17
17
headers = {"User-Agent" : "ARBSMapDo V1" }
18
18
19
19
# Used to avoid spamming beatsaver API
20
- beatsaver_scraped_data_url = "https://raw.githubusercontent. com/andruzzzhka/BeatSaberScrappedData/master/beatSaverScrappedData .zip"
20
+ BEATSAVER_SCRAPED_DATA_URL = "https://github. com/andruzzzhka/BeatSaberScrappedData/raw/ master/combinedScrappedData .zip"
21
21
22
22
23
23
class Cache :
@@ -27,36 +27,38 @@ def __init__(self, arbsmapdo_config):
27
27
self .tmp_dir = Path (arbsmapdo_config ["tmp_dir" ])
28
28
self .tmp_dir .mkdir (exist_ok = True )
29
29
self .download_dir = Path (arbsmapdo_config ["download_dir" ])
30
- self .beatsaver_cachefile = Path (arbsmapdo_config ["beatsaver_cachefile" ])
31
- self .levelhash_cachefile = Path (arbsmapdo_config ["levelhash_cachefile" ])
30
+ self .beatsaver_cachefile = Path (
31
+ arbsmapdo_config ["beatsaver_cachefile" ])
32
+ self .levelhash_cachefile = Path (
33
+ arbsmapdo_config ["levelhash_cachefile" ])
32
34
self .rescan = arbsmapdo_config ["rescan" ]
33
35
34
- self ._beatsaver_cache , self .local_cache_last_downloaded = self .load_beatsaver_cache_from_andruzzzhka_scrapes ()
36
+ # self.load_beatsaver_cache_from_andruzzzhka_scrapes()
37
+ self ._beatsaver_cache , self .local_cache_last_downloaded = {}, {}
35
38
self .levelhash_cache = self .load_levelhash_cache ()
36
39
self .update_levelhash_cache ()
37
40
38
41
def _update_andruzzzhka_scrapes (self ):
39
42
print ("Updating Local BeatSaver Cache. This helps avoiding spamming the API hundreds of times." )
40
- print ("Downloading beatSaverScrappedData (helps to avoid spamming beatsaver API)..." )
43
+ print (
44
+ "Downloading beatSaverScrappedData (helps to avoid spamming beatsaver API)..." )
41
45
42
46
self .beatsaver_cachefile .unlink (missing_ok = True )
43
47
44
48
dl_filename = str (self .tmp_dir .joinpath ("andruzzzhka_scrape.zip" ))
45
- wget .download (beatsaver_scraped_data_url , dl_filename )
49
+ wget .download (BEATSAVER_SCRAPED_DATA_URL , dl_filename )
46
50
47
51
# Unzip
48
52
try :
49
53
with zipfile .ZipFile (str (dl_filename ), "r" ) as zip_file :
50
54
zip_file .extractall (str (self .tmp_dir ))
51
55
# Replace old local cache by updated version
52
- os .replace (self .tmp_dir .joinpath ("beatSaverScrappedData.json" ), self .beatsaver_cachefile )
56
+ os .replace (self .tmp_dir .joinpath (
57
+ "combinedScrappedData.json" ), self .beatsaver_cachefile )
53
58
except zipfile .BadZipFile as e :
54
59
# Workaround for https://github.com/andruzzzhka/BeatSaberScrappedData/issues/6
55
- print (f"Error when extracting zipfile:\n { e } \n Downloading uncompressed json instead (will be slower!)..." )
56
- wget .download ("https://raw.githubusercontent.com/andruzzzhka/BeatSaberScrappedData/master/beatSaverScrappedData.json" ,
57
- out = str (self .beatsaver_cachefile ))
60
+ print (f"Error when extracting zipfile:\n { e } " )
58
61
59
- last_updated = time .time ()
60
62
print ("\n Cache ready." )
61
63
62
64
def load_beatsaver_cache_from_andruzzzhka_scrapes (self ):
@@ -70,8 +72,8 @@ def load_beatsaver_cache_from_andruzzzhka_scrapes(self):
70
72
71
73
# Elapsed is given in seconds. The scrapes of andruzzzhka get updated once per day.
72
74
if elapsed > 86400 :
73
- update = True
74
-
75
+ update = True
76
+
75
77
# Update cache if neccessary
76
78
if update :
77
79
self ._update_andruzzzhka_scrapes ()
@@ -83,7 +85,7 @@ def load_beatsaver_cache_from_andruzzzhka_scrapes(self):
83
85
84
86
cache_dict = dict ()
85
87
for levelinfo in scraped_cache_raw :
86
- cache_dict [levelinfo ["hash " ].lower ()] = levelinfo
88
+ cache_dict [levelinfo ["Hash " ].lower ()] = levelinfo
87
89
88
90
return cache_dict , last_modified
89
91
@@ -96,16 +98,18 @@ def _get_beatsaver_info_by_api(self, level_id):
96
98
try :
97
99
if len (level_id ) == 40 :
98
100
# Is sha1-hash
99
- response = requests .get ("https://beatsaver.com/api/maps/by-hash/{id}" .format (id = level_id ), headers = headers )
101
+ response = requests .get (
102
+ "https://beatsaver.com/api/maps/hash/{id}" .format (id = level_id ), headers = headers )
100
103
else :
101
104
# Treat as level key
102
- response = requests .get ("https://beatsaver.com/api/maps/detail/{id}" .format (id = level_id ), headers = headers )
105
+ response = requests .get (
106
+ "https://beatsaver.com/api/maps/detail/{id}" .format (id = level_id ), headers = headers )
103
107
json = response .json ()
104
108
except JSONDecodeError :
105
109
print ("Failed to get level {} from Beat Saver." .format (level_id ))
106
110
return None
107
111
return json
108
-
112
+
109
113
def get_beatsaver_info (self , level_id ):
110
114
"""
111
115
Uses information from the cache (hashes only) or calls the beatsaver API (hashes & keys)
@@ -135,7 +139,7 @@ def load_levelhash_cache(self):
135
139
return hashcache
136
140
else :
137
141
return dict ()
138
-
142
+
139
143
def save_levelhash_cache (self ):
140
144
# Save updates to the cachefile
141
145
with open (self .levelhash_cachefile , "w+" , encoding = "UTF-8" ) as fp :
@@ -149,12 +153,10 @@ def update_levelhash_cache(self):
149
153
# If this is not the case -> calculate the hash and store to hashcache
150
154
if entry .name not in self .levelhash_cache .keys ():
151
155
if entry .is_dir ():
152
- levelhash = utils .calculate_Level_hash_from_dir (self .download_dir .joinpath (entry .name ))
156
+ levelhash = utils .calculate_Level_hash_from_dir (
157
+ self .download_dir .joinpath (entry .name ))
153
158
self .levelhash_cache [entry .name ] = levelhash
154
159
if entry .is_file () and entry .suffix == ".zip" :
155
- levelhash = utils .calculate_Level_hash_from_zip (self .download_dir .joinpath (entry .name ))
160
+ levelhash = utils .calculate_Level_hash_from_zip (
161
+ self .download_dir .joinpath (entry .name ))
156
162
self .levelhash_cache [entry .name ] = levelhash
157
-
158
-
159
-
160
-
0 commit comments