Initial
This commit is contained in:
33
python/bulk_renamer.py
Executable file
33
python/bulk_renamer.py
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bulk file renamer that you interface with using intermediary text file.
|
||||
|
||||
import os
|
||||
|
||||
def setup_file():
|
||||
files = open('files.txt', 'w')
|
||||
|
||||
for file in os.listdir('.'):
|
||||
files.write(file + '\n')
|
||||
|
||||
def rename_files():
|
||||
new_names = open('files.txt', 'r')
|
||||
|
||||
for file in os.listdir('.'):
|
||||
new_name = str(new_names.readline()).rstrip('\n')
|
||||
|
||||
if (new_name == file) or new_name == "files.txt":
|
||||
continue
|
||||
|
||||
os.rename(file, new_name)
|
||||
|
||||
print("bulk renamer by Olari.\n")
|
||||
|
||||
print("Generating files.txt")
|
||||
print("Modify it to rename files\n")
|
||||
setup_file()
|
||||
|
||||
input("Waiting for input...\n")
|
||||
|
||||
print("Renaming files")
|
||||
rename_files()
|
||||
26
python/discord_spammer.py
Executable file
26
python/discord_spammer.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# deadsimple discord bot
|
||||
|
||||
import discord
|
||||
import asyncio
|
||||
|
||||
client = discord.Client()
|
||||
|
||||
async def task():
|
||||
await client.wait_until_ready()
|
||||
|
||||
while not client.is_closed:
|
||||
server = client.get_channel('channel id')
|
||||
await client.send_message(server, 'message')
|
||||
await asyncio.sleep(60) # time to sleep
|
||||
|
||||
@client.event
|
||||
async def on_ready():
|
||||
print('Logged in as')
|
||||
print(client.user.name)
|
||||
print(client.user.id)
|
||||
print('------')
|
||||
|
||||
client.loop.create_task(task())
|
||||
client.run('username', 'password')
|
||||
257
python/encode_touhou_lossless.py
Executable file
257
python/encode_touhou_lossless.py
Executable file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# overly complicated flac reencoder originally written for the big touhou music torrent on nyaa.si
|
||||
# contains example of process calling and multithreading
|
||||
# not recommended to use (probably destructive)
|
||||
|
||||
import os, re, time
|
||||
import subprocess
|
||||
import wave
|
||||
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
f = open("errlog", "w", encoding="UTF-8")
|
||||
|
||||
def remove_if_exists(filename):
|
||||
if os.path.exists(filename):
|
||||
os.remove(filename)
|
||||
|
||||
def opus_enc(queue, split_track_filename, track, quality=140.0):
|
||||
subprocess.call([
|
||||
'opusenc',
|
||||
'--vbr',
|
||||
'--bitrate', str(quality),
|
||||
#'--comp', 10, #default
|
||||
#'--framesize', '60', # default 20
|
||||
'--artist', track.performer,
|
||||
'--comment', 'tracknumber={}'.format(track.index),
|
||||
'--title', track.title,
|
||||
'--date', track.cd_date,
|
||||
'--genre', track.cd_genre,
|
||||
'--album', track.cd_title,
|
||||
split_track_filename,
|
||||
'{}.opus'.format(os.path.splitext(split_track_filename)[0]),
|
||||
])
|
||||
queue.get()
|
||||
|
||||
class Track():
|
||||
def __init__(self, track_index, filename, parent):
|
||||
for member in ('cd_performer', 'cd_title', 'cd_date', 'cd_genre'):
|
||||
setattr(self, member, getattr(parent, member))
|
||||
|
||||
self.filename = filename
|
||||
self.filepath = filename[:filename.rfind('\\')+1]
|
||||
self.title = ''
|
||||
self.index = track_index
|
||||
self.performer = self.cd_performer
|
||||
self.time = { 1:0.0 }
|
||||
|
||||
def __str__(self):
|
||||
return "{} - {} - {}".format(self.index, self.title, self.time)
|
||||
|
||||
class CueSheet():
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
self.filepath = filename[:filename.rfind('\\')+1]
|
||||
|
||||
self.cd_performer = ''
|
||||
self.cd_title = ''
|
||||
self.cd_genre = ''
|
||||
self.cd_date = ''
|
||||
|
||||
self.current_file = ''
|
||||
|
||||
self.tracks = []
|
||||
|
||||
self.regex_lst = (
|
||||
(re.compile(r'PERFORMER\s(.+)'), self.__performer),
|
||||
(re.compile(r'REM DATE\s(.+)'), self.__date),
|
||||
(re.compile(r'REM GENRE\s(.+)'), self.__genre),
|
||||
(re.compile(r'TITLE\s(.+)'), self.__title),
|
||||
(re.compile(r'FILE\s(.+)\sWAVE'), self.__file),
|
||||
(re.compile(r'TRACK\s(\d{2})\sAUDIO'), self.__track),
|
||||
(re.compile(r'INDEX\s(\d{2})\s(\d{1,3}:\d{2}:\d{2})'), self.__index),
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
value = "Title: {}\nPerformer: {}\nGenre: {}\nDate: {}\n".format(self.cd_title, self.cd_performer, self.cd_genre, self.cd_date)
|
||||
for track in self.tracks:
|
||||
value += ' ' + str(track) + '\n'
|
||||
return value
|
||||
|
||||
def read(self):
|
||||
with open(self.filename, 'r', encoding='utf-8-sig') as f:
|
||||
for line in f:
|
||||
for regex, handler in self.regex_lst:
|
||||
mobj = regex.match(line.strip())
|
||||
if mobj:
|
||||
handler(*self.unquote(mobj.groups()))
|
||||
|
||||
def split(self):
|
||||
encoding_queue = multiprocessing.Queue(multiprocessing.cpu_count())
|
||||
|
||||
cds = set()
|
||||
tracks = set()
|
||||
|
||||
for i, track in enumerate(self.tracks):
|
||||
# FATAL: sheet is not for .tta file
|
||||
if track.filename[-4:] != '.tta':
|
||||
f.write("\nFilename isn't .tta ({}):\n{}\n".format(track.filename, str(self)))
|
||||
return
|
||||
|
||||
track_path = track.filepath + ' - '.join((track.index, track.title)).replace('?', '').replace('\\', '').replace('\\', '').replace(':', '')
|
||||
|
||||
track_opus = track_path + '.opus'
|
||||
track_wav = track_path + '.wav'
|
||||
|
||||
if os.path.exists(track_opus):
|
||||
f.write("File already exists, continuing... ({})".format(track_opus))
|
||||
remove_if_exists(track_wav)
|
||||
continue
|
||||
|
||||
cd_wav = track.filename[:-4] + '.wav'
|
||||
|
||||
# decode .tta if needed
|
||||
if not os.path.exists(cd_wav):
|
||||
# FATAL: no file to decode
|
||||
if not os.path.exists(track.filename):
|
||||
f.write("\nFile doesn't exist ({}):\n{}\n".format(track.filename, str(self)))
|
||||
return
|
||||
|
||||
result = subprocess.call([
|
||||
'tta', #'ttaenc',
|
||||
'-d',
|
||||
track.filename,
|
||||
#'-o',
|
||||
cd_wav
|
||||
])
|
||||
|
||||
# FATAL: .tta decode failed
|
||||
if result != 0:
|
||||
f.write("Failed to decode .tta ({}):\n{}\n\n".format(track.index, str(self)))
|
||||
return
|
||||
|
||||
# remove .tta
|
||||
remove_if_exists(track.filename)
|
||||
|
||||
# split .wav into track
|
||||
if not os.path.exists(track_wav):
|
||||
wafi = wave.open(cd_wav, 'rb')
|
||||
param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname')
|
||||
params = wafi.getparams()
|
||||
param_dict = dict(zip(param_names, params))
|
||||
|
||||
start = int(param_dict['framerate'] * track.time[1])
|
||||
stop = param_dict['nframes']
|
||||
if len(sheet.tracks) > i+1 and sheet.tracks[i+1].filename == track.filename:
|
||||
stop = int(param_dict['framerate'] * sheet.tracks[i+1].time.get(0, sheet.tracks[i+1].time[1]))
|
||||
|
||||
wafi_write = wave.open(track_wav, 'wb')
|
||||
newparams = list(params)
|
||||
newparams[3] = 0
|
||||
wafi_write.setparams( tuple(newparams) )
|
||||
|
||||
wafi.setpos(start)
|
||||
wafi_write.writeframes(wafi.readframes(stop-start))
|
||||
wafi_write.close()
|
||||
|
||||
wafi.close()
|
||||
|
||||
encoding_queue.put(track_wav)
|
||||
p = multiprocessing.Process(
|
||||
target=opus_enc,
|
||||
args=(
|
||||
encoding_queue,
|
||||
track_wav,
|
||||
track
|
||||
)
|
||||
)
|
||||
|
||||
p.start()
|
||||
|
||||
if cd_wav not in cds:
|
||||
cds.add(cd_wav)
|
||||
|
||||
tracks.add(track_wav)
|
||||
|
||||
while not encoding_queue.empty():
|
||||
time.sleep(0.2)
|
||||
|
||||
for cd in cds:
|
||||
remove_if_exists(cd)
|
||||
|
||||
for track in tracks:
|
||||
remove_if_exists(track)
|
||||
|
||||
remove_if_exists(self.filename)
|
||||
|
||||
print(self.filename, "done!")
|
||||
|
||||
|
||||
def __performer(self, s):
|
||||
if not self.tracks:
|
||||
self.cd_performer = s
|
||||
else:
|
||||
self.tracks[-1].performer = s
|
||||
|
||||
def __title(self, s):
|
||||
if not self.tracks:
|
||||
self.cd_title = s
|
||||
else:
|
||||
self.tracks[-1].title = s
|
||||
|
||||
def __genre(self, s):
|
||||
self.cd_genre = s
|
||||
|
||||
def __date(self, s):
|
||||
self.cd_date = s
|
||||
|
||||
def __file(self, s):
|
||||
self.current_file = s
|
||||
|
||||
def __track(self, s):
|
||||
self.tracks.append( Track(s, self.filepath + self.current_file, self) )
|
||||
|
||||
def __index(self, idx, s):
|
||||
idx = int(idx)
|
||||
self.tracks[-1].time[idx] = self.index_split(s)
|
||||
|
||||
@staticmethod
|
||||
def index_split(s):
|
||||
t = s.split(':')
|
||||
return float(t[0])*60 + float(t[1]) + float(t[2]) / 75.0
|
||||
|
||||
@staticmethod
|
||||
def dqstrip(s):
|
||||
if s[0] == '"' and s[-1] == '"': return s[1:-1]
|
||||
return s
|
||||
|
||||
@staticmethod
|
||||
def unquote(t):
|
||||
return tuple([CueSheet.dqstrip(s.strip()) for s in t])
|
||||
|
||||
class SplitterWorker(Thread):
|
||||
def __init__(self, queue, filename):
|
||||
Thread.__init__(self)
|
||||
self.queue = queue
|
||||
self.filename = filename
|
||||
|
||||
def run(self):
|
||||
sheet = CueSheet(self.filename)
|
||||
sheet.read()
|
||||
sheet.split()
|
||||
|
||||
if __name__ == '__main__':
|
||||
queue = Queue()
|
||||
|
||||
for root, dirs, files in os.walk('.'):
|
||||
for name in files:
|
||||
if name[-4:].lower() == '.cue':
|
||||
worker = SplitterWorker(queue, root + '\\' + name)
|
||||
worker.daemon = True
|
||||
worker.start()
|
||||
|
||||
if os.path.exists('./stop'):
|
||||
exit(1)
|
||||
16
python/extract_cover_flac.py
Executable file
16
python/extract_cover_flac.py
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# extracts cover from flac audio file
|
||||
|
||||
from mutagen.flac import FLAC, Picture
|
||||
|
||||
song = "cover.flac"
|
||||
|
||||
var = FLAC(song)
|
||||
pics = var.pictures
|
||||
print (pics)
|
||||
for p in pics:
|
||||
if p.type == 3: #front cover
|
||||
print("\nfound front cover")
|
||||
with open("cover.jpg", "wb") as f:
|
||||
f.write(p.data)
|
||||
45
python/ftb_continuum_oregen_plotter.py
Executable file
45
python/ftb_continuum_oregen_plotter.py
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
class Ore:
|
||||
def __init__(self, name, low, high):
|
||||
self.name = name
|
||||
self.low = low
|
||||
self.high = high
|
||||
|
||||
oregen = (
|
||||
Ore('gold', 0, 32),
|
||||
Ore('iron', 0, 64),
|
||||
Ore('coal', 0, 128),
|
||||
Ore('lapis', 0, 32),
|
||||
Ore('diamond', 0, 16),
|
||||
Ore('redstone', 0, 16),
|
||||
Ore('emerald', 0, 16),
|
||||
Ore('certuz', 24, 48),
|
||||
Ore('apatite', 54, 96),
|
||||
Ore('uranium', 16, 42),
|
||||
Ore('ruby', 16, 42),
|
||||
Ore('sapphire', 16, 42),
|
||||
Ore('bauxite', 48, 72),
|
||||
Ore('tungsten', 0, 10),
|
||||
Ore('peridot', 16, 42),
|
||||
Ore('copper', 40, 75),
|
||||
Ore('tin', 20, 55),
|
||||
Ore('silver', 5, 30),
|
||||
Ore('lead', 5, 30),
|
||||
Ore('aluminum', 48, 72),
|
||||
Ore('nickel', 5, 20),
|
||||
Ore('platinum', 7, 75),
|
||||
Ore('iridium', 8, 75),
|
||||
)
|
||||
|
||||
oregen = sorted(oregen, key=lambda x: x.low, reverse=True)
|
||||
|
||||
plt.boxplot([(x.high, x.low) for x in oregen], labels=[x.name.title() for x in oregen], vert=False)
|
||||
plt.title('FTB Continuum Oregen')
|
||||
plt.xlabel('Y-level')
|
||||
plt.xticks()
|
||||
|
||||
plt.savefig('oregen.svg', format='svg')
|
||||
14
python/idatocstylesigs.py
Executable file
14
python/idatocstylesigs.py
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# converts code signatures found in ida to ones easily usable in c++ code
|
||||
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: " + sys.argv[0] + " [ida style sig]")
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
'"' + ''.join([('\\x' + (byte if byte != '?' else '00')) for byte in sys.argv[1:]]) + '", "' +
|
||||
''.join([('x' if byte != '?' else '?') for byte in sys.argv[1:]]) + '"'
|
||||
)
|
||||
45
python/text/mal_list_render.py
Executable file
45
python/text/mal_list_render.py
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import bs4
|
||||
import sys
|
||||
import os
|
||||
import glob
|
||||
|
||||
# converts MyAnimeList's XML exports to readable (but less informative) text files.
|
||||
|
||||
animelists = glob.glob('animelist*.xml')
|
||||
|
||||
for animelist in animelists:
|
||||
with open(animelist, 'r') as xml, open(animelist + '.txt', 'w') as file:
|
||||
soup = bs4.BeautifulSoup(xml.read(), 'html.parser')
|
||||
|
||||
completed = []
|
||||
ptw = []
|
||||
movies = []
|
||||
for anime in soup.select('anime'):
|
||||
line = anime.select('series_title')[0].text + ' ' + anime.select('my_watched_episodes')[0].text + '/' + anime.select('series_episodes')[0].text + '\n'
|
||||
|
||||
series_type = anime.select('series_type')[0].text
|
||||
status = anime.select('my_status')[0].text
|
||||
|
||||
if series_type == 'Movie':
|
||||
movies.append(line)
|
||||
continue
|
||||
|
||||
if status == 'Completed':
|
||||
completed.append(line)
|
||||
elif status == 'Plan to Watch':
|
||||
ptw.append(line)
|
||||
|
||||
for title in completed:
|
||||
file.write(title)
|
||||
|
||||
file.write('\n')
|
||||
|
||||
for title in ptw:
|
||||
file.write(title)
|
||||
|
||||
file.write('\n')
|
||||
|
||||
for title in movies:
|
||||
file.write(title)
|
||||
14
python/text/youtube_subscriptions_render.py
Executable file
14
python/text/youtube_subscriptions_render.py
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# converts youtube subscriptions export .xml to simple text file
|
||||
|
||||
with open('subscription_manager', 'r', encoding='utf-8') as f:
|
||||
while f:
|
||||
line = f.readline()
|
||||
if not 'channel_id' in line:
|
||||
continue
|
||||
|
||||
idpos = line.find('channel_id=') + len('channel_id=')
|
||||
|
||||
channel_id = line[idpos:idpos + len('UC3Y4vKAzTCqSdOt0ZeYWvTg')]
|
||||
print('https://www.youtube.com/channel/' + channel_id)
|
||||
23
python/web/gogdb_scaper.py
Executable file
23
python/web/gogdb_scaper.py
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests # http requests
|
||||
import bs4 # html parser
|
||||
|
||||
with open("titles.txt", "w", encoding="UTF-8") as file:
|
||||
for index in range(1, 175):
|
||||
url = "https://www.gogdb.org/products?page=" + str(index)
|
||||
print(url)
|
||||
|
||||
page = requests.get("https://www.gogdb.org/products?page=" + str(index))
|
||||
page.raise_for_status()
|
||||
|
||||
soup = bs4.BeautifulSoup(page.text, "html.parser")
|
||||
|
||||
producttable = soup.select("#product-table")[0]
|
||||
titles = producttable.select("tr")
|
||||
for title in titles:
|
||||
if len(title.select(".col-type")) == 0:
|
||||
continue
|
||||
|
||||
if title.select(".col-type")[0].text == 'Game':
|
||||
file.write(title.select(".col-name")[0].text.strip() + '\n')
|
||||
34
python/web/mal_top_fetcher.py
Executable file
34
python/web/mal_top_fetcher.py
Executable file
@@ -0,0 +1,34 @@
|
||||
import requests, bs4, time
|
||||
|
||||
def get_titles(filename, title_type, maxrank):
|
||||
with open(filename, "w", encoding="UTF-8") as file:
|
||||
limit = 0
|
||||
written = 0
|
||||
|
||||
while True:
|
||||
page = requests.get("https://myanimelist.net/topanime.php?type=" + title_type + "&limit=" + str(limit), headers = {'User-agent': 'stopblockingmyscriptlol'})
|
||||
page.raise_for_status()
|
||||
|
||||
soup = bs4.BeautifulSoup(page.text, "html.parser")
|
||||
|
||||
titles = soup.select("a[rel]")
|
||||
|
||||
for title in titles:
|
||||
if len(title.text) == 2 or title.text == "Login":
|
||||
continue
|
||||
|
||||
file.write(title.text.strip() + '\n')
|
||||
written += 1
|
||||
|
||||
print(str(written), title.text.strip())
|
||||
|
||||
if written >= maxrank:
|
||||
break
|
||||
|
||||
if written >= maxrank:
|
||||
break
|
||||
|
||||
limit += 50
|
||||
|
||||
get_titles("rating.txt", "tv", 1750)
|
||||
get_titles("movies.txt", "movie", 300)
|
||||
137
python/web/pwned_checker.py
Executable file
137
python/web/pwned_checker.py
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# reads SteamIDs from ./accounts.txt and outputs ban information into ./output.html
|
||||
|
||||
import urllib.request
|
||||
import json
|
||||
import time
|
||||
|
||||
steamapikey = ""
|
||||
|
||||
# read file and remove trailing newline because we're making a list
|
||||
account_lines = [line.rstrip("\n") for line in open("accounts.txt").readlines()]
|
||||
|
||||
ids = []
|
||||
for line in account_lines:
|
||||
# https://developer.valvesoftware.com/wiki/SteamID
|
||||
Z = int(line.split(':')[2])
|
||||
V = 0x0110000100000000 # profile ID constant
|
||||
Y = int(line.split(':')[1])
|
||||
W = Z * 2 + V + Y
|
||||
ids.append(str(W))
|
||||
|
||||
# API takes in comma seperated steamids
|
||||
ids_string = ",".join([x for x in ids])
|
||||
|
||||
# https://developer.valvesoftware.com/wiki/Steam_Web_API
|
||||
summaries = json.load(urllib.request.urlopen("http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=" + steamapikey + "&steamids=" + ids_string))
|
||||
bans = json.load(urllib.request.urlopen("http://api.steampowered.com/ISteamUser/GetPlayerBans/v1/?key=" + steamapikey + "&steamids=" + ids_string))
|
||||
|
||||
output_file = open("output.html", "w", encoding="utf-8")
|
||||
|
||||
output_file.write('\
|
||||
<!DOCTYPE html>\n\
|
||||
<html>\n\
|
||||
<head>\n\
|
||||
<style>\n\
|
||||
body {\n\
|
||||
font-family: sans-serif;\n\
|
||||
}\n\
|
||||
\n\
|
||||
table {\n\
|
||||
color: #222;\n\
|
||||
border-collapse: collapse;\n\
|
||||
}\n\
|
||||
\n\
|
||||
tr, th, td {\n\
|
||||
border: 1px solid #a2a9b1;\n\
|
||||
padding: 0.2em 0.4em;\n\
|
||||
}\n\
|
||||
\n\
|
||||
.pwned {\n\
|
||||
background-color: #ffb6c1\n\
|
||||
}\n\
|
||||
\n\
|
||||
th {\n\
|
||||
background-color: #eaecf0;\n\
|
||||
text-align: center;\n\
|
||||
}\n\
|
||||
\n\
|
||||
a:hover, a:visited, a:link, a:active {\n\
|
||||
text-decoration: none;\n\
|
||||
}\n\
|
||||
</style>\n\
|
||||
</head>\n\
|
||||
\n\
|
||||
<body>\n\
|
||||
<table>\n\
|
||||
<tr>\n\
|
||||
<th>ID</th>\n\
|
||||
<th>Name</th>\n\
|
||||
<th>Status</th>\n\
|
||||
<th>Type</th>\n\
|
||||
<th>BanDays</th>\n\
|
||||
<th>LogDays</th>\n\
|
||||
<th>Profile</th>\n\
|
||||
</tr>\n\
|
||||
')
|
||||
|
||||
numbanned = 0
|
||||
|
||||
for i in range(len(ids)):
|
||||
try:
|
||||
for summary in summaries['response']['players']:
|
||||
if summary['steamid'] == str(ids[i]):
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
try:
|
||||
for ban in bans['players']:
|
||||
if ban['SteamId'] == str(ids[i]):
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
status = ""
|
||||
bantype = ""
|
||||
bandays = ""
|
||||
|
||||
if ban['VACBanned']:
|
||||
status = "Pwned"
|
||||
bantype = "VAC"
|
||||
bandays = str(ban['DaysSinceLastBan'])
|
||||
numbanned += 1
|
||||
|
||||
if ban['NumberOfGameBans'] > 0:
|
||||
status = "Pwned"
|
||||
bantype = "Gameban"
|
||||
bandays = str(ban['DaysSinceLastBan'])
|
||||
numbanned += 1
|
||||
|
||||
name = summary['personaname']
|
||||
name = name.replace("<", "<") # escape html tag names
|
||||
name = name.replace(">", ">")
|
||||
|
||||
logdays = str(int((time.time() - summary['lastlogoff']) / 86400)) # length of a day in epoch
|
||||
|
||||
line_start = ' <td>' if status != "Pwned" else ' <td class="pwned">'
|
||||
|
||||
output_file.write(' <tr>\n')
|
||||
output_file.write(line_start + '<a target="_blank" href="' + 'https://steamcommunity.com/profiles/' + str(ids[i]) + '">' + str(ids[i]) + '</a></td>\n')
|
||||
output_file.write(line_start + name + '</td>\n')
|
||||
output_file.write(line_start + status + '</td>\n')
|
||||
output_file.write(line_start + bantype + '</td>\n')
|
||||
output_file.write(line_start + bandays + '</td>\n')
|
||||
output_file.write(line_start + logdays + '</td>\n')
|
||||
output_file.write(line_start + '<a target="_blank" href="' + 'https://steamcommunity.com/profiles/' + str(ids[i]) + '"><img src=' + summary['avatarmedium'] + ">"+ '</img></td>\n')
|
||||
output_file.write(' </tr>\n')
|
||||
|
||||
i += 1
|
||||
|
||||
output_file.write('\
|
||||
</table>\n\
|
||||
' + str(numbanned) + '/' + str(len(ids)) + ' banned\n\
|
||||
</body>\n\
|
||||
\
|
||||
</html>\n')
|
||||
58
python/web/sheethost_scraper.py
Executable file
58
python/web/sheethost_scraper.py
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import bs4
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 4:
|
||||
print('Usage: ' + sys.argv[0] + ' [login] [password] [page name]')
|
||||
exit(1)
|
||||
|
||||
login = sys.argv[1]
|
||||
password = sys.argv[2]
|
||||
page_name = sys.argv[3]
|
||||
|
||||
def download_sheet(s, url):
|
||||
page = s.get(url)
|
||||
|
||||
try:
|
||||
page.raise_for_status()
|
||||
except:
|
||||
print("Couldn't get %s" % url)
|
||||
return
|
||||
|
||||
soup = bs4.BeautifulSoup(page.text, 'html.parser')
|
||||
|
||||
links = soup.select('a')
|
||||
for link in links:
|
||||
if '.pdf' in link.text:
|
||||
with open(link.text[1:link.text.find('.pdf') + 4], 'wb') as f:
|
||||
file = s.get(link.attrs['href'])
|
||||
|
||||
try:
|
||||
page.raise_for_status()
|
||||
except:
|
||||
print("Couldn't get %s" % link.text)
|
||||
return
|
||||
|
||||
for chunk in file.iter_content(100000):
|
||||
f.write(chunk)
|
||||
|
||||
with requests.session() as s:
|
||||
login = s.post('https://hi10anime.com/wp-login.php', { 'login':login, 'password':password })
|
||||
login.raise_for_status()
|
||||
|
||||
if not 'You have successfully logged in. Welcome back!' in login.text:
|
||||
print("Couldn't log in")
|
||||
exit(1)
|
||||
|
||||
page = s.get('https://sheet.host/user/%s/sheets' % page_name)
|
||||
page.raise_for_status()
|
||||
|
||||
soup = bs4.BeautifulSoup(page.text, 'html.parser')
|
||||
|
||||
titles = soup.select('.score-title')
|
||||
|
||||
for title in titles:
|
||||
print('Getting %s' % title.text)
|
||||
download_sheet(s, title.attrs['href'])
|
||||
66
python/web/subscomru_scraper.py
Executable file
66
python/web/subscomru_scraper.py
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import bs4
|
||||
|
||||
def download_file(url):
|
||||
filename = url[url.rfind('/') + 1:]
|
||||
|
||||
print('Downloading %s' % filename)
|
||||
|
||||
file = requests.get(url)
|
||||
|
||||
try:
|
||||
file.raise_for_status()
|
||||
except:
|
||||
open(filename + '.failed', 'w')
|
||||
|
||||
with open(filename, 'wb') as f:
|
||||
for chunk in file.iter_content(100000):
|
||||
f.write(chunk)
|
||||
|
||||
f.close()
|
||||
|
||||
def get_file_name(url):
|
||||
page = requests.get(url)
|
||||
page.raise_for_status()
|
||||
|
||||
soup = bs4.BeautifulSoup(page.text, "html.parser")
|
||||
|
||||
cells = soup.select('td.even') # gay retardness
|
||||
for cell in cells:
|
||||
text = cell.getText()
|
||||
|
||||
if '.rar' in text or '.zip' in text or '.7z' in text:
|
||||
return text
|
||||
|
||||
|
||||
def scrape_site(url):
|
||||
# split the url to use later for constructing new urls
|
||||
base_url = url[:url.rfind('/') + 1]
|
||||
url = url[url.rfind('/') + 1:]
|
||||
|
||||
while True:
|
||||
print('Getting %s' % url)
|
||||
|
||||
page = requests.get(base_url + url)
|
||||
page.raise_for_status() # throw on fail
|
||||
|
||||
soup = bs4.BeautifulSoup(page.text, "html.parser")
|
||||
|
||||
titles = soup.select('a[title]')
|
||||
for title in titles:
|
||||
link = title.attrs['href']
|
||||
|
||||
if 'id' in link and not 'dl' in link: # find content links
|
||||
print('Found %s' % title.attrs['title'])
|
||||
download_file(base_url + 'sub/enganime/' + get_file_name(base_url + link))
|
||||
|
||||
next_link = soup.select('span.pagenav_next > a')
|
||||
if len(next_link) == 0:
|
||||
print('End of site')
|
||||
break
|
||||
|
||||
url = next_link[0].attrs['href']
|
||||
|
||||
scrape_site('http://subs.com.ru/list.php?c=enganime&p=5&w=asc&d=1')
|
||||
Reference in New Issue
Block a user