summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xtelegram.py68
-rwxr-xr-xtwitter.py193
-rwxr-xr-xwebapp.py22
3 files changed, 0 insertions, 283 deletions
diff --git a/telegram.py b/telegram.py
deleted file mode 100755
index d95ce34..0000000
--- a/telegram.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python3
-
-from urllib.request import urlopen, Request
-from datetime import datetime
-from bs4 import BeautifulSoup
-import sys
-
-from rss import _format_date
-
-
-def telegram(channel):
- url = "https://t.me/s/" + channel
- res = urlopen(Request(url))
- soup = BeautifulSoup(res, features="html.parser")
-
- # messages = soup.find_all('div', attrs={'class': 'tgme_widget_message_wrap'})
- messages = soup.find_all("div", attrs={"class": "tgme_widget_message_bubble"})
-
- for message in messages:
- date = message.find("time", attrs={"class": "time"})["datetime"]
- html = message.find("div", attrs={"class": "tgme_widget_message_text"})
- # preview = message.find('div', attrs={'class': 'tgme_widget_message_bubble'})
- link = message.find("a", attrs={"class": "tgme_widget_message_date"})
- title = html.text if html else "No text"
- description = str(message) # if preview else '?'
- link = link["href"]
- yield title, description, link, date
-
-
-def main(channel):
- url = "https://t.me/s/" + channel
-
- print(
- """<?xml version="1.0" encoding="UTF-8"?>
-<rss version="2.0">
- <channel>
- <title>Telegram: """
- + channel
- + """</title>
- <link>"""
- + url
- + """</link>
- <description>The latest entries of the telegram channel of """
- + channel
- + """</description>
- <lastBuildDate>"""
- + _format_date(datetime.now())
- + """</lastBuildDate>"""
- )
-
- for title, description, link, date in telegram(channel):
- print(" <item>")
- print(" <title><![CDATA[" + title + "]]></title>")
- print(" <link>" + link + "</link>")
- print(" <description><![CDATA[" + description + "]]></description>")
- print(" <pubDate>" + date + "</pubDate>")
- # print(' <media:content url="' + thumbnail + b'" type="image/jpeg" />')
- print(" </item>")
-
- print(" </channel>")
- print("</rss>")
-
-
-if __name__ == "__main__":
- if len(sys.argv) != 2:
- print("Usage:", sys.argv[0], "<telegram channel>")
- sys.exit(1)
- main(sys.argv[1])
diff --git a/twitter.py b/twitter.py
deleted file mode 100755
index 673fce7..0000000
--- a/twitter.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python3
-
-from urllib.error import HTTPError
-from urllib.request import urlopen, Request
-import logging
-
-# from requests_oauthlib import OAuth1Session
-from datetime import datetime
-import sys
-import json
-
-bearer = None
-
-
-def getBearer():
- global bearer
- if bearer:
- return bearer
- headers = {
- "Authorization": "Basic Zzl1MXI2SFpYTXg0SXU5UGs5VlNvTzFUdzpmeTIyQjN2QVRRNUI2eGthb1BFdFFRUmtuUGQ1WGZBbnBKVG5hc0ZRa3NyUm5qaVNsaw==",
- "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
- }
- data = b"grant_type=client_credentials"
- url = "https://api.twitter.com/oauth2/token"
-
- res = urlopen(Request(url, headers=headers, data=data, method="POST"))
- response = json.loads(res.read().decode("UTF-8"))
- bearer = response["access_token"]
-
- return bearer
-
-
-def unshorten_urls(title, description, urls):
- for url in urls:
- shorted_url = url["url"]
- long_url = url["expanded_url"]
-
- if "images" in url:
- img = url["images"][0]["url"]
- long_url_html = '<a href="' + long_url + '"><img src="' + img + '"/></a>'
- else:
- long_url_html = '<a href="' + long_url + '">' + long_url + "</a>"
-
- description = description.replace(shorted_url, long_url_html)
- title = title.replace(shorted_url, long_url)
- return title, description
-
-
-def twitter(user):
- # 500.000 Tweets per month
- # API KEY = g9u1r6HZXMx4Iu9Pk9VSoO1Tw
- # API SECRET KEY = fy22B3vATQ5B6xkaoPEtQQRknPd5XfAnpJTnasFQksrRnjiSlk
-
- headers = {"authorization": "Bearer " + getBearer()}
-
- # Recent = last 7 days
- url = (
- "https://api.twitter.com/2/tweets/search/recent?query=from:"
- + user
- + "&tweet.fields=created_at,author_id,lang,source,public_metrics,entities&expansions=referenced_tweets.id,attachments.media_keys&media.fields=url"
- )
-
- try:
- res = urlopen(Request(url, headers=headers))
- response = json.loads(res.read().decode("UTF-8"))
- except Exception as exc:
- logging.error("Request to twitter failed.", exc_info=exc)
- return None
-
- feed = {
- "title": "Twitter: " + user,
- "url": "https://twitter.com/" + user,
- "description": "The latest entries of the twitter account of " + user,
- "content": [],
- }
-
- if not response["meta"]["result_count"]:
- return feed
-
- feed["content"] = [
- parse_tweet(
- user,
- tweet,
- response.get("includes", {}).get("tweets", []),
- response.get("includes", {}).get("media", []),
- headers,
- )
- for tweet in response["data"]
- ]
-
- return feed
-
-
-def parse_tweet(user, tweet, included_tweets, included_media, headers):
- title = description = tweet["text"]
- link = "https://twitter.com/" + user + "/status/" + str(tweet["id"])
-
- # Check included re-tweets / replace by Retweet
- ref_enclosures = []
- for rt in tweet.get("referenced_tweets", []):
- if rt["type"] == "retweeted":
- rt_info = title[: title.index(":") + 2]
- ref_tweet = next(t for t in included_tweets if t["id"] == rt["id"])
- title = rt_info + ref_tweet["text"]
- description = rt_info + ref_tweet["text"]
- title, description = unshorten_urls(
- title, description, ref_tweet.get("entities", {}).get("urls", [])
- )
- elif rt["type"] == "replied_to":
- description += "<br/>This was a reply to: " + rt["id"]
- text, enclosures = fetch_single_tweet(rt["id"], headers)
- description += text
- ref_enclosures.extend(enclosures)
- elif rt["type"] == "quoted":
- description += "<br/>Quoted tweet: " + rt["id"]
- text, enclosures = fetch_single_tweet(rt["id"], headers)
- description += text
- ref_enclosures.extend(enclosures)
- else:
- description += f"<br/><br/>Unknown reference type: {rt['type']}"
-
- title, description = unshorten_urls(
- title, description, tweet.get("entities", {}).get("urls", [])
- )
-
- # Attach media
- enclosures = []
- included_media_keys = tweet.get("attachments", {}).get("media_keys", [])
- for included_media_key in included_media_keys:
- ref_media = next(
- t for t in included_media if t["media_key"] == included_media_key
- )
- if "url" not in ref_media:
- continue
- if ref_media.get("type", "") == "photo":
- description += '<br/><img src="' + ref_media["url"] + '" />'
- else:
- enclosures.append(ref_media["url"])
- enclosures.extend(ref_enclosures)
-
- # Append Retweets etc
- description += "<br/><br/>"
- description += str(tweet["public_metrics"]["retweet_count"]) + " Retweets, "
- description += str(tweet["public_metrics"]["like_count"]) + " Likes, "
- description += str(tweet["public_metrics"]["reply_count"]) + " Replies, "
- description += str(tweet["public_metrics"]["quote_count"]) + " Quotes"
- description += "<br/>"
- description += "Source: " + tweet["source"]
-
- date = datetime.strptime(tweet["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ")
-
- return {
- "title": title,
- "url": link,
- "content": description,
- "date": date,
- "enclosures": enclosures,
- }
-
-
-def fetch_single_tweet(id, headers):
- url = f"https://api.twitter.com/2/tweets/{id}?tweet.fields=entities&expansions=attachments.media_keys&media.fields=url"
- try:
- res = urlopen(Request(url, headers=headers))
- response = json.loads(res.read().decode("UTF-8"))
- except Exception as exc:
- logging.error("Request to twitter failed (single tweet).", exc_info=exc)
- return None
-
- text = response['data'].get('text', 'no text')
-
- enclosures = []
- for media in response['data'].get('includes', {}).get('media', []):
- if "url" not in media:
- continue
- if media.get("type", "") == "photo":
- text += '<br/><img src="' + media["url"] + '" />'
- else:
- enclosures.append(media["url"])
-
- return text, enclosures
-
-def main(channel):
- print(twitter(channel))
-
-
-if __name__ == "__main__":
- if len(sys.argv) != 2:
- print("Usage:", sys.argv[0], "<twitter channel>")
- sys.exit(1)
- main(sys.argv[1])
- # twitter('rheinbahn_intim')
- # twitter('realDonaldTrump')
diff --git a/webapp.py b/webapp.py
index 3c213e4..7022e08 100755
--- a/webapp.py
+++ b/webapp.py
@@ -13,10 +13,7 @@ from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
from flask import Flask, Response
-
-# from telegram import telegram
from rss import buildRSS
-from twitter import twitter
from zdf import zdf
app = Flask(__name__)
@@ -40,25 +37,6 @@ def not_found(e):
return "Die angeforderte Seite konnte nicht gefunden werden."
-@app.route("/twitter/<account>")
-def feedTwitter(account):
- return rssResponse(twitter(account))
-
-
-# @app.route("/telegram/<account>")
-# def feedTelegram(account):
-# content = [{'title': t, 'url': u, 'content': c, 'date': d}
-# for t,c,u,d in telegram(account)]
-# xml = buildRSS(
-# title = 'Telegram: ' + account,
-# url = 'https://t.me/s/' + account,
-# description = 'The latest entries of the telegram channel of ' + account,
-# content = content)
-# response = Response(xml, mimetype='text/xml')
-# response.headers['Access-Control-Allow-Origin'] = '*'
-# return response
-
-
@app.route("/zdf/<path:feed>")
def filterZDFFeed(feed):
return rssResponse(zdf(feed))