#!/usr/bin/python3 import requests import urllib.request import sys import os import argparse import time import hashlib import json parser = argparse.ArgumentParser(add_help=True) parser.add_argument('--source', default=False, action='store_true', dest='source', help='Links to the source code of the software' ) parser.add_argument('--debug', default=False, action='store_true', dest='debug', help='Enables debug output' ) parser.add_argument('--amount','-a', default=0, dest='amount', action="store", help="the amount of funnies you'd like to download per tag", type=int ) parser.add_argument('tags', nargs='+', type=str, help='Provides tags to be check for funny downloading' ) args = parser.parse_args() if args.source: print("https://git.snootgame.xyz/PrincipalSpears/comedyGenerator") sys.exit(0) headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:91.0) Gecko/20100101 Firefox/91.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", "Sec-Fetch-Dest": "document", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-Site": "cross-site", "Cache-Control": "max-age=0" } videos = [] for tags in args.tags: if args.debug: print("Downloading Tag: " + tags) headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:91.0) Gecko/20100101 Firefox/91.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", "Sec-Fetch-Dest": "document", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-Site": "cross-site", "Cache-Control": "max-age=0" } master = requests.get('https://ifunny.co/', headers=headers) combineHeader = (dict(master.headers)|headers) requestHeader = { "User-Agent":combineHeader['User-Agent'], "Content-Type":combineHeader['Content-Type'], "x-requested-with": "fetch", "x-csrf-token": combineHeader['Set-Cookie'].split(';')[0].split('=')[1], "set-cookies":combineHeader['Set-Cookie'], "access-control-allow-headers":combineHeader['access-control-allow-headers'] } requestCookies = { "CID" : combineHeader['Set-Cookie'].split(';')[3].split('=')[2], "sound" : "off", "viewMode" : "list", "x-csrf-token": combineHeader['Set-Cookie'].split(';')[0].split('=')[1] } for tries in range(100): try: tagPage = requests.get("https://ifunny.co/api/v1/feeds?filter=video&tag=" + tags, headers=requestHeader, cookies=requestCookies) if args.debug: print("Got Webpage!") except: if tries < 100 - 1: print("Rate Limited! Sleeping for " + str(tries*1.5) + " seconds!") time.sleep(tries*1.5) continue break JSONDump = tagPage.json() while len(videos) < args.amount: print("Currently have " + str(len(videos)) + " videos out of " + str(args.amount) + " (" + str((len(videos)/args.amount)*100) + "%)") for item in range(len(JSONDump['items'])): videos.append(JSONDump['items'][item]['url']) for tries in range(100): try: tagPage = requests.get("https://ifunny.co/api/v1/feeds?filter=video&tag=" + tags + "&next=" + JSONDump['pagination']['next'], headers=requestHeader, cookies=requestCookies) JSONDump = tagPage.json() if args.debug: print("Got New Tag Page!") break except: if tries < 100 - 1: print("Rate Limited! Sleeping for " + str(tries*1.5) + " seconds!") time.sleep(tries*1.5) continue if len(videos) > args.amount: videos = videos[:args.amount] if args.debug: print("Videos list truncated! Its now: " + str(len(videos)) + " units long") for video in videos: if args.debug: print("Now running for " + str(video)) if isinstance(video, str): if args.debug: print("URL read as: " + video) name = tags + "-" + hashlib.md5(video.encode('utf-8')).hexdigest() + ".mp4" path = '/home/' + os.environ['USER'] + '/Videos/unsorted/' + name if args.debug: print("name read as: " + name) if os.path.exists(path): print(name + " already exists!") else: print("saving " + video + " as " + name) for tries in range(100): try: urllib.request.urlretrieve(video, path) except: if tries < 100 - 1: print("Rate Limited! Sleeping for " + str(tries*1.5) + " seconds!") time.sleep(tries*1.5) continue break #urllib.request.urlretrieve(video.get_attribute("data-src"), '/home/' + os.environ['USER'] + '/Videos/unsorted/' + name) elif args.debug: print("URL is NOT a string, it is a " + str(type(video))) #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU Affero General Public License version 3 as published by #the Free Software Foundation. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU Affero General Public License for more details. # #You should have received a copy of the GNU Affero General Public License #along with this program. If not, see .