From 826c0fcdcd88ccb08aca0ee24d7d1054898eff62 Mon Sep 17 00:00:00 2001 From: Hashblen Date: Sun, 31 Dec 2023 17:36:56 +0100 Subject: [PATCH 1/5] Bit more lax requirements --- Pipfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Pipfile b/Pipfile index b23d425..2b9d86f 100644 --- a/Pipfile +++ b/Pipfile @@ -13,4 +13,3 @@ openpyxl = "*" [requires] python_version = "3.10" -python_full_version = "3.10.4" From 29e594769e0b4c3b0fc0f3a2e5d88f98f6f2ed56 Mon Sep 17 00:00:00 2001 From: Hashblen Date: Sun, 31 Dec 2023 17:37:25 +0100 Subject: [PATCH 2/5] Fix errors parsing fandom banners --- banner_parser.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/banner_parser.py b/banner_parser.py index 8dae216..53c2e7e 100644 --- a/banner_parser.py +++ b/banner_parser.py @@ -24,12 +24,16 @@ def load_page(): def load_page_static(): - with open('banner_history_wiki\Wish History _ Genshin Impact Wiki _ Fandom.html', 'r') as f: - return f.read() + try: + with open('banner_history_wiki\Wish History _ Genshin Impact Wiki _ Fandom.html', 'r') as f: + return f.read() + except UnicodeDecodeError as e: + with open('banner_history_wiki\Wish History _ Genshin Impact Wiki _ Fandom.html', 'r', encoding="utf8") as f: + return f.read() def parse_table(table: Tag): - header = table.find('thead') + header = table.find('tbody') # thead doesn't exist for me header_row = header.find('tr') header_cells = header_row.find_all('th') header_titles = [x.text.strip() for x in header_cells] @@ -39,7 +43,7 @@ def parse_table(table: Tag): return None body = table.find('tbody') - body_rows = body.find_all('tr') + body_rows = body.find_all('tr')[1:] # as thead doesn't exist for me, I add [1:] reset_time = time(hour=5, minute=0, second=0) # UTC+1 == Europe @@ -89,7 +93,7 @@ def parse_page(page_text): results = [] soup = BeautifulSoup(page_text, 'lxml') - tables = soup.find_all('table', class_='article-table alternating-colors-table sortable jquery-tablesorter') + tables = soup.find_all('table', class_='article-table alternating-colors-table sortable') # jquery-tablesorter is added after opening the page, it doesn't exist when you wget. for table in tables: results += parse_table(table) From 6768bd748fd01968702bcd32eff43254442d1d59 Mon Sep 17 00:00:00 2001 From: Hashblen Date: Sun, 31 Dec 2023 17:58:52 +0100 Subject: [PATCH 3/5] small fix for generated file to be accpeted by paimon.moe --- main.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/main.py b/main.py index 3ff0afd..3f2afdd 100644 --- a/main.py +++ b/main.py @@ -155,6 +155,9 @@ def generate_history(): fill_history(wb, banners, reversed(genshin_wish_history['history'])) + # Information sheet needs to have "Paimon.moe Wish History Export" in A1 or the improt fails according to paimon.moe's source code + wb['Information']['A1'] = 'Paimon.moe Wish History Export' + wb.save('generated_history.xlsx') return From 732f382d7b7c53dfedc3bdd3df1fd6e640913b1c Mon Sep 17 00:00:00 2001 From: Hashblen Date: Sun, 31 Dec 2023 18:38:09 +0100 Subject: [PATCH 4/5] Better readme + download fandom page by default --- banner_parser.py | 2 +- main.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/banner_parser.py b/banner_parser.py index 53c2e7e..56496e2 100644 --- a/banner_parser.py +++ b/banner_parser.py @@ -101,7 +101,7 @@ def parse_page(page_text): def main(): - page = load_page_static() + page = load_page() banners = parse_page(page) with open('banner_history.json', 'w') as f: json.dump(banners, f, indent=4) diff --git a/main.py b/main.py index 3f2afdd..0892539 100644 --- a/main.py +++ b/main.py @@ -125,7 +125,7 @@ def fill_history(wb: Workbook, banners: list[dict], wish_history: list[dict]): entry.append(roll_count[banner['name']]) # Group - entry.append(roll_count[banner['name']]) # IDK what this is + entry.append(roll_count[banner['name']]) # Example: if 10-pull, they all belong to one group. Resets for each banner. # Banner entry.append(banner['name']) From 53ec2b5df814d61fed260b30da4f61bf3356526c Mon Sep 17 00:00:00 2001 From: Hashblen Date: Sun, 31 Dec 2023 18:41:33 +0100 Subject: [PATCH 5/5] better readme --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8c8fe31..aa09978 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,20 @@ Export your history from hotgames.gg and import it to paimon.moe tool ## Setting up +0. If you don't have pipenv, run `pip install pipenv` 1. `git clone https://github.com/mostm/paimon_moe_importing.git` 2. `cd paimon_moe_importing` 3. `pipenv sync` +## Getting the history from hotgames.gg +1. Go to https://genshin.hotgames.gg/wish-counter +2. Press F12 to open the developer menu and go to the `Network` tab. +3. While it is open, refresh the page. +4. After a few seconds, use the `Filter by URL` bar and type `genshin_wish_history` +5. Right click on the request that appears and click `Open in new tab` +6. Press `Ctrl + S` and save the json file in the `paimon_moe_importing` folder without renaming it (it sould be called `genshin_wish_history.json`). + ## Running - Execute `pipenv run python banner_parser.py` to generate history banner list based on Fandom wiki (I hope they don't rework that...) -- Execute `pipenv run python main.py` to generate Excel file for usage on the site, seems to work properly. +- Execute `pipenv run python main.py` to generate Excel file for usage on the site, seems to work properly. The generated file is called `generated_history.xlsx`. +- On paimon.moe, click on `Settings` at the top of the wish page and click on `Import from Excel`. Drag and drop or select the `generated_history.xlsx` file.