summaryrefslogtreecommitdiff
path: root/bake.py
blob: 3a10cbf5797365f73ae64b48a124030826b79561 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import os
import json
import shutil
import string
from glob import glob
from slugify import slugify
from tqdm import tqdm
import statistics

from config import INPUT_DIRECTORY, OUTPUT_DIRECTORY, DOWNLOAD_FILES, CONTENT_RATINGS
from render import render_chapter_pages, render_series_pages, render_index_pages, render_home, render_style

BAD_TAGS = [
    "girls-love"
]

SERIES_LOC = set()

def chapter_loc(loc, i):
    return f"{loc}/{int(i)+1:x}"

def _loc_with_num(loc, num):
    if num == 0:
        return loc
    return f"{loc}.{num}"

def series_loc(series):

    loc = slugify(series["sort_name"])
    original_loc = loc

    i = 0
    loc = _loc_with_num(original_loc, i)
    while loc in SERIES_LOC:
        loc = _loc_with_num(original_loc, i)
        i += 1

    SERIES_LOC.add(loc)

    return loc


def parse_metadata(metadata, series_dir):
    result = {}

    result["id"] = metadata["id"]

    result["name"] = metadata["title"]
    result["rating"] = metadata["rating"]

    assert result["rating"] in CONTENT_RATINGS

    sort_name = metadata["title"].lower()
    if sort_name.startswith("the "):
        sort_name = sort_name[4:]
    result["sort_name"] = sort_name
    loc = series_loc(result)
    result["location"] = loc
    result["description"] = metadata["description"]
    result["cover"] = glob(os.path.join(series_dir, "cover.*"))[0]
    tags = [slugify(tag) for tag in metadata["tags"]]

    for tag in BAD_TAGS:
        if tag in tags:
            tags.remove(tag)

    tags.append("all")

    result["tags"] = tags

    result["score"] = get_score(metadata["id"])

    chapters = []
    for i, chapter_data in enumerate(metadata["chapters"]):

        chapter = chapter_data["name"]
        chapter_dir = chapter.replace("Volume.", "Vol.").replace("Chapter.", "Ch.")

        chapter = {
            "index": i,
            "name": chapter,
            "location": chapter_loc(loc, i),
            "images": list(
                im for im in sorted(glob(os.path.join(series_dir, chapter_dir, "*")))
                if im.endswith((".webp", ".png", ".jpeg", ".jpg"))
            )
        }

        if len(chapter["images"]) == 0:
            continue

        chapters.append(chapter)

    # Add prev/next links
    for i, chapter in enumerate(chapters):
        if i > 0:
            chapter["prev"] = chapter_loc(loc, i-1)

        if i < len(chapters) - 1:
            chapter["next"] = chapter_loc(loc, i+1)

    result["chapters"] = chapters


    return result

def create_index(directory):
    index = []

    i = 0

    for dirname in tqdm(os.listdir(directory)):
        series_dir = str(os.path.join(directory, dirname))

        try:
            with open(os.path.join(series_dir, "manga_info.json")) as f:
                metadata = json.load(f)[0]
        except FileNotFoundError:
            continue
        except PermissionError:
            continue

        if not glob(os.path.join(series_dir, "cover.*")):
            continue

        series_data = parse_metadata(metadata, series_dir)

        first_letter = series_data["sort_name"][0].lower()
        series_data["index_letter"] = first_letter if first_letter in string.ascii_lowercase else "#"

        index.append(series_data)

        i += 1
        # if i > 100:
        #     break

    return index

def load_scores():

    score_files = []

    for filename in DOWNLOAD_FILES:
        with open(filename) as f:
            num_lines = len([_ for _ in f])

        scores = {}

        with open(filename) as f:
            for i, line in enumerate(f):

                manga_id = line.strip().split("/")[-1]
                score = num_lines - i

                scores[manga_id] = score
        score_files.append(scores)

    return score_files

def get_score(manga_id):
    manga_scores = []

    for score_file in scores:
        if manga_id in score_file:
            manga_scores.append(score_file[manga_id])

    return statistics.mean(manga_scores)

def _filter_index(original_index, r, keep_empty):
    if r is None:
        return original_index

    new_index = {}

    for key, s in original_index.items():
        new_list = list(filter(lambda x: x["rating"] == r, s))
        if new_list or keep_empty:
            new_index[key] = new_list

    return new_index


if __name__ == "__main__":

    scores = load_scores()

    print("create index")
    index = create_index(INPUT_DIRECTORY)

    print("Prepare output folder")
    shutil.rmtree(OUTPUT_DIRECTORY, ignore_errors=True)
    os.makedirs(OUTPUT_DIRECTORY)

    # Letter index
    letter_index = {}
    for letter in string.ascii_lowercase + "#":
        letter_index[letter] = []

        for series in index:
            if series["index_letter"] == letter:
                letter_index[letter].append(series)

    # Tag index
    all_tags = set()
    for series in index:
        all_tags.update(series["tags"])
    all_tags = list(sorted(all_tags))

    tag_index = {}
    for tag in all_tags:
        tag_index[tag] = []

        for series in index:
            if tag in series["tags"]:
                tag_index[tag].append(series)

    render_home(OUTPUT_DIRECTORY)
    render_style(OUTPUT_DIRECTORY)
    print("series")
    render_series_pages(OUTPUT_DIRECTORY, index)
    print("chapters")
    render_chapter_pages(OUTPUT_DIRECTORY, index)


    print("letter index")
    for i, rating in enumerate([None] + CONTENT_RATINGS):
        filtered_tag_index = _filter_index(letter_index, rating, keep_empty=True)
        render_index_pages(OUTPUT_DIRECTORY, f"i", filtered_tag_index, rating)

    print("tag index")
    for i, rating in enumerate([None] + CONTENT_RATINGS):
        filtered_letter_index = _filter_index(tag_index, rating, keep_empty=False)
        render_index_pages(OUTPUT_DIRECTORY, f"t", filtered_letter_index, rating)

    for asset in ["lexend.woff2", "home.mp4", "favicon.ico"]:
        shutil.copy(os.path.join("assets", asset), OUTPUT_DIRECTORY)