1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
#!/usr/bin/env python3
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import bs4
import time
import feedgenerator
from datetime import datetime, timedelta
from fastapi import FastAPI, Response
app = FastAPI()
@app.get("/")
async def root():
sneed = feedgenerator.Rss201rev2Feed(
title = "Dilbert Comic Strip",
description = "New 'bert",
link = ("https://dilbert.techchud.xyz/")
)
for day in range(0,7):
rawDate = datetime.today() - timedelta(days = day)
date = rawDate.strftime('%Y-%m-%d')
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
session.get("https://dilbert.com/strip/" + date)
tries = 25
for i in range(tries):
search = requests.get("https://dilbert.com/strip/" + date)
if search.status_code == 503:
if i < tries - 1:
time.sleep(1)
continue
else:
raise
break
soup = bs4.BeautifulSoup(search.text, features="lxml")
comicTitle = soup.find("meta", property="og:title")["content"]
comicURL = soup.find("meta", property="og:image")["content"]
comicAuthor = soup.find("meta", property="article:author")["content"]
url = "https://dilbert.com/strip/" + date
sneed.add_item( title=comicTitle, description=comicURL, author_name=comicAuthor, link=url, id=id)
feed = sneed.writeString("utf-8")
return Response(content=feed, media_type="application/xml")
|