-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathPosterBandit_v2.py
287 lines (239 loc) · 11.9 KB
/
PosterBandit_v2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
"""优化版海报爬取"""
import requests
from bs4 import BeautifulSoup
import os
import time, random
from datetime import datetime
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError
import shutil
import logging
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
# logging.disable()
DEFAULT_POSTER_PATH = r"E:\Gazer\DoubanGaze\data\poster"
def compare_date(target_date_1, target_date_2, viewed_date):
"""比较条目标记日期是否在指定日期之间, 包含起止日期
Args:
target_date_1 (str (YYYY-MM-DD)): 指定的起始日期
target_date_2 (str (YYYY-MM-DD)): 指定的结束日期
viewed_date (str (YYYY-MM-DD)): 爬取的观看日期
Returns:
bool: 如果在在指定日期之间, 返回 True, 否则 False
"""
try:
target_date_1_obj = datetime.strptime(target_date_1, "%Y-%m-%d")
target_date_2_obj = datetime.strptime(target_date_2, "%Y-%m-%d")
viewed_date_obj = datetime.strptime(viewed_date, "%Y-%m-%d")
return target_date_1_obj <= viewed_date_obj <= target_date_2_obj
except ValueError as e:
logging.error(f"日期格式错: {e}")
return False
def crawl_link(target_link, session, headers):
"""爬取目标链接, 返回 soup 对象
Args:
target_link (string): 目标链接
session (requests.Session): requests.Session 对象
headers (dict): 请求头
Returns:
tuple[bs4.BeautifulSoup, int] : soup 对象 和 状态码
"""
status_code = None
try:
target_response = session.get(target_link, headers=headers)
status_code = target_response.status_code
target_response.raise_for_status()
target_soup = BeautifulSoup(target_response.content.decode('utf-8'), 'html.parser')
return target_soup, status_code
except requests.exceptions.RequestException as e:
logging.error(f"请求链接失败: {e}")
return None, status_code
except Exception as e:
logging.error(f"其他错误: {e}")
return None, status_code
def create_folder(target_date_1, target_date_2, poster_save_path=DEFAULT_POSTER_PATH):
"""在指定目录下创建要保存 <指定日期的条目海报> 的文件夹,
名称由填写的起止日期决定, 格式为{target_date_1}_{target_date_2},
e.g. 2024_12_1_2024_12_31
Args:
target_date_1 (str (YYYY-MM-DD)): 指定的起始日期
target_date_2 (str (YYYY-MM-DD)): 指定的结束日期
poster_save_path (string): 保存海报图片的指定目录
Defaults to "E:\\Gazer\\DoubanGaze\\data\\poster".
Returns:
single_poster_save_path(string): 保存每个条目海报图片的文件夹路径
e.g. "E:\\Gazer\\DoubanGaze\\data\\poster\\2024_12_1_2024_12_31".
"""
date_filename = f"{target_date_1.replace('-', '_')}_{target_date_2.replace('-', '_')}"
single_poster_save_path = os.path.join(poster_save_path, date_filename)
os.makedirs(single_poster_save_path, exist_ok=True)
print(f"将保存在 {single_poster_save_path} 📂")
return single_poster_save_path
def save_poster(poster_src, viewed_date_text, single_poster_save_path, count, headers):
"""保存单独的海报到本地.
Args:
poster_src (string): 爬取的海报下载 URL.
viewed_date_text (string (YYYY-MM-DD)): 对应条目的标记日期.
single_poster_save_path (string): create_folder 函数返回的
保存每个条目海报图片的文件夹路径
count: 当前海报的保存序号
header (dict): 请求头
Returns:
bool: 保存成功为 True, 失败为 False
"""
retry_count = 3 # 设置重试次数
for attempt in range(retry_count):
try:
img_response = requests.get(poster_src, stream=True, headers=headers)
img_response.raise_for_status()
# 构建图片文件名
date_filename = viewed_date_text.replace("-", "_")
img_filename = f"{date_filename}_{count}.jpg" # 加入序号
img_path = os.path.join(single_poster_save_path, img_filename)
# 保存图片到本地
with open(img_path, 'wb') as f:
shutil.copyfileobj(img_response.raw, f)
print(f"img saved {img_path} 🧩")
return True
except HTTPError as e:
logging.error(f"HTTP 错误代码: {e.response.status_code} , 图片下载失败: {poster_src}")
if e.response.status_code == 418 and attempt < retry_count - 1:
time.sleep(random.randint(5,10))
logging.debug("U R A TEAPOT, 我正在重试... ☕")
continue
else:
logging.error(f"请求失败! 已达到最大重试次数, 图片下载失败: {poster_src}")
return False
except requests.exceptions.RequestException as e:
logging.error(f"图片下载失败: {e}")
return False
except Exception as e:
logging.error(f"保存图片时出现错误: {e}")
return False
logging.error(f"保存图片失败, 达到最大重试次数, {poster_src}")
return False
def get_movie_elements(soup):
"""从 soup 对象中获取并返回所有包含电影条目的 div 元素
Args:
soup (bs4.BeautifulSoup): 初始页的 soup 对象
Returns:
bs4.element.ResultSet: 所有电影条目的 div 元素列表
"""
return soup.select("#content > div.grid-16-8.clearfix > div.article .item.comment-item")
def get_movie_info(movie_element):
"""从单个电影条目 div 元素中获取并返回观看日期和压缩海报链接
Args:
movie_element (bs4.element.Tag): 单个电影条目的 div 元素
Returns:
tuple[str | None, str | None]:
观看日期+压缩海报链接的 Tag 对象元组, 或 (None, None)
"""
viewed_date_element = movie_element.select_one("div.info span.date")
compressed_link_element = movie_element.select_one("div.pic img")
if viewed_date_element and compressed_link_element:
viewed_date_text = viewed_date_element.text.strip()
compressed_link = compressed_link_element['src']
return viewed_date_text, compressed_link
return None, None
def get_headers(cookies, referer):
return {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"Cookie": cookies,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"Referer": referer,
# "Referer": "https://movie.douban.com/",
"Upgrade-Insecure-Requests": "1",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "none",
}
def download_poster_images(cookies, target_date_1, target_date_2, poster_save_path, page_id=1):
"""
从给定的 URL 中提取海报图片链接,并下载保存到指定的文件夹中
Args:
cookies: 包含登录信息的 cookies
target_date_1 (str (YYYY-MM-DD)): 指定的起始日期
target_date_2 (str (YYYY-MM-DD)): 指定的结束日期
page_id: 爬取开始的页数 Defaults to 1.
Returns:
None
Raises:
requests.exceptions.RequestException: 如果请求网页失败
Exception: 其他可能发生的错误
"""
start_time = time.perf_counter()
single_poster_save_path = create_folder(target_date_1, target_date_2, poster_save_path)
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
first_page = True
count = 0
while True:
page_processed = (int(page_id) - 1) * 15
viewed_movie_url = f"https://movie.douban.com/people/133554124/collect?start={page_processed}&sort=time&rating=all&mode=grid&type=all&filter=all"
headers = get_headers(cookies, viewed_movie_url)
soup, status_code = crawl_link(viewed_movie_url, session, headers)
if soup:
print(f"NOW IN {viewed_movie_url} 🦎")
try:
# 打印响应的 HTML 以调试
# print(soup.prettify())
# 1. 找到所有包含电影条目的 div 元素
viewed_movie_elements = get_movie_elements(soup)
logging.debug(f"Found {len(viewed_movie_elements)} marks. FYI: 15.")
all_items_not_match = True
viewed_date_text = "" # 初始化
# 1. 遍历每个电影条目 div 元素
for movie_element in viewed_movie_elements:
movie_start_time = time.perf_counter() #
# 2. 获取观看日期和压缩链接
viewed_date_text, compressed_link = get_movie_info(movie_element)
if viewed_date_text:
logging.debug(f"Found date: {viewed_date_text}")
if compare_date(target_date_1, target_date_2, viewed_date_text):
all_items_not_match = False # 如果发现符合要求的条目,则修改标记
logging.debug(f"{all_items_not_match}, 即将爬取...")
# 3. 构造海报链接
photo_id = compressed_link.split("/")[-1].split(".webp")[0][1:]
poster_link = f"https://img9.doubanio.com/view/photo/l/public/p{photo_id}.webp"
# 4. 修改 headers,使用 compressed_link 作为 referer
headers = get_headers(cookies, compressed_link)
# 5. 增加延迟 (重要!)
time.sleep(random.uniform(2, 6)) # 随机延迟 5-15 秒
# 4. 保存海报, 调用 save_poster,并传入 headers
count += 1
save_poster(poster_link, viewed_date_text, single_poster_save_path, count, headers=headers)
movie_end_time = time.perf_counter() # 停止单个电影条目的计时器
movie_elapsed_time = movie_end_time - movie_start_time
logging.debug(f"单个电影条目爬取耗时:{movie_elapsed_time:.2f} 秒 ⏱️")
# Loop for single page ended here.
# Going to the next page.
if all_items_not_match and not first_page:
print("Done. 😺")
break
else:
first_page = False
page_id += 1
except Exception as e:
print(f"爬取过程中出现错误: {e}")
else:
logging.debug(f"请求失败! 状态码: {status_code}")
if not viewed_movie_elements:
print("No more marks. 😺")
break
print(f"I have saved {count} posters for U. 😼")
end_time = time.perf_counter() # 停止计时器
elapsed_time = end_time - start_time
minutes, seconds = divmod(int(elapsed_time), 60)
print(f"总耗时:{minutes} 分 {seconds} 秒 🏅")
if __name__ == "__main__":
target_date_1 = "2024-1-1" # TODO 填写起始日期 2025-1-1
target_date_2 = "2024-12-31" # TODO 填写截止日期 2025-1-31
print(f"正在爬取 {target_date_1} - {target_date_2} 的海报... 🤖")
poster_save_path = r"E:\Gazer\DoubanGaze\data\poster"
cookies = 'SWEETCOOKIE'
download_poster_images(cookies, target_date_1, target_date_2, poster_save_path, page_id=1) # TODO 修改起始页