Fix line-endings; Increase portability; Add speedtyper.py

This commit is contained in:
olari
2019-05-26 23:05:28 +03:00
parent 661a5984a3
commit 63a1b4f501
33 changed files with 1447 additions and 1341 deletions

View File

@@ -1,33 +1,34 @@
#!/usr/bin/env python3
# bulk file renamer that you interface with using intermediary text file.
import os
def setup_file():
files = open('files.txt', 'w')
for file in os.listdir('.'):
files.write(file + '\n')
def rename_files():
new_names = open('files.txt', 'r')
for file in os.listdir('.'):
new_name = str(new_names.readline()).rstrip('\n')
if (new_name == file) or new_name == "files.txt":
continue
os.rename(file, new_name)
print("bulk renamer by Olari.\n")
print("Generating files.txt")
print("Modify it to rename files\n")
setup_file()
input("Waiting for input...\n")
print("Renaming files")
rename_files()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# bulk file renamer that you interface with using intermediary text file.
import os
def setup_file():
files = open('files.txt', 'w')
for file in os.listdir('.'):
files.write(file + '\n')
def rename_files():
new_names = open('files.txt', 'r')
for file in os.listdir('.'):
new_name = str(new_names.readline()).rstrip('\n')
if (new_name == file) or new_name == "files.txt":
continue
os.rename(file, new_name)
print("bulk renamer by Olari.\n")
print("Generating files.txt")
print("Modify it to rename files\n")
setup_file()
input("Waiting for input...\n")
print("Renaming files")
rename_files()

View File

@@ -1,26 +1,27 @@
#!/usr/bin/env python3
# deadsimple discord bot
import discord
import asyncio
client = discord.Client()
async def task():
await client.wait_until_ready()
while not client.is_closed:
server = client.get_channel('channel id')
await client.send_message(server, 'message')
await asyncio.sleep(60) # time to sleep
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.loop.create_task(task())
client.run('username', 'password')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# deadsimple discord bot
import discord
import asyncio
client = discord.Client()
async def task():
await client.wait_until_ready()
while not client.is_closed:
server = client.get_channel('channel id')
await client.send_message(server, 'message')
await asyncio.sleep(60) # time to sleep
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.loop.create_task(task())
client.run('username', 'password')

View File

@@ -1,257 +1,257 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# overly complicated flac reencoder originally written for the big touhou music torrent on nyaa.si
# contains example of process calling and multithreading
# not recommended to use (probably destructive)
import os, re, time
import subprocess
import wave
from queue import Queue
from threading import Thread
f = open("errlog", "w", encoding="UTF-8")
def remove_if_exists(filename):
if os.path.exists(filename):
os.remove(filename)
def opus_enc(queue, split_track_filename, track, quality=140.0):
subprocess.call([
'opusenc',
'--vbr',
'--bitrate', str(quality),
#'--comp', 10, #default
#'--framesize', '60', # default 20
'--artist', track.performer,
'--comment', 'tracknumber={}'.format(track.index),
'--title', track.title,
'--date', track.cd_date,
'--genre', track.cd_genre,
'--album', track.cd_title,
split_track_filename,
'{}.opus'.format(os.path.splitext(split_track_filename)[0]),
])
queue.get()
class Track():
def __init__(self, track_index, filename, parent):
for member in ('cd_performer', 'cd_title', 'cd_date', 'cd_genre'):
setattr(self, member, getattr(parent, member))
self.filename = filename
self.filepath = filename[:filename.rfind('\\')+1]
self.title = ''
self.index = track_index
self.performer = self.cd_performer
self.time = { 1:0.0 }
def __str__(self):
return "{} - {} - {}".format(self.index, self.title, self.time)
class CueSheet():
def __init__(self, filename):
self.filename = filename
self.filepath = filename[:filename.rfind('\\')+1]
self.cd_performer = ''
self.cd_title = ''
self.cd_genre = ''
self.cd_date = ''
self.current_file = ''
self.tracks = []
self.regex_lst = (
(re.compile(r'PERFORMER\s(.+)'), self.__performer),
(re.compile(r'REM DATE\s(.+)'), self.__date),
(re.compile(r'REM GENRE\s(.+)'), self.__genre),
(re.compile(r'TITLE\s(.+)'), self.__title),
(re.compile(r'FILE\s(.+)\sWAVE'), self.__file),
(re.compile(r'TRACK\s(\d{2})\sAUDIO'), self.__track),
(re.compile(r'INDEX\s(\d{2})\s(\d{1,3}:\d{2}:\d{2})'), self.__index),
)
def __str__(self):
value = "Title: {}\nPerformer: {}\nGenre: {}\nDate: {}\n".format(self.cd_title, self.cd_performer, self.cd_genre, self.cd_date)
for track in self.tracks:
value += ' ' + str(track) + '\n'
return value
def read(self):
with open(self.filename, 'r', encoding='utf-8-sig') as f:
for line in f:
for regex, handler in self.regex_lst:
mobj = regex.match(line.strip())
if mobj:
handler(*self.unquote(mobj.groups()))
def split(self):
encoding_queue = multiprocessing.Queue(multiprocessing.cpu_count())
cds = set()
tracks = set()
for i, track in enumerate(self.tracks):
# FATAL: sheet is not for .tta file
if track.filename[-4:] != '.tta':
f.write("\nFilename isn't .tta ({}):\n{}\n".format(track.filename, str(self)))
return
track_path = track.filepath + ' - '.join((track.index, track.title)).replace('?', '').replace('\\', '').replace('\\', '').replace(':', '')
track_opus = track_path + '.opus'
track_wav = track_path + '.wav'
if os.path.exists(track_opus):
f.write("File already exists, continuing... ({})".format(track_opus))
remove_if_exists(track_wav)
continue
cd_wav = track.filename[:-4] + '.wav'
# decode .tta if needed
if not os.path.exists(cd_wav):
# FATAL: no file to decode
if not os.path.exists(track.filename):
f.write("\nFile doesn't exist ({}):\n{}\n".format(track.filename, str(self)))
return
result = subprocess.call([
'tta', #'ttaenc',
'-d',
track.filename,
#'-o',
cd_wav
])
# FATAL: .tta decode failed
if result != 0:
f.write("Failed to decode .tta ({}):\n{}\n\n".format(track.index, str(self)))
return
# remove .tta
remove_if_exists(track.filename)
# split .wav into track
if not os.path.exists(track_wav):
wafi = wave.open(cd_wav, 'rb')
param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname')
params = wafi.getparams()
param_dict = dict(zip(param_names, params))
start = int(param_dict['framerate'] * track.time[1])
stop = param_dict['nframes']
if len(sheet.tracks) > i+1 and sheet.tracks[i+1].filename == track.filename:
stop = int(param_dict['framerate'] * sheet.tracks[i+1].time.get(0, sheet.tracks[i+1].time[1]))
wafi_write = wave.open(track_wav, 'wb')
newparams = list(params)
newparams[3] = 0
wafi_write.setparams( tuple(newparams) )
wafi.setpos(start)
wafi_write.writeframes(wafi.readframes(stop-start))
wafi_write.close()
wafi.close()
encoding_queue.put(track_wav)
p = multiprocessing.Process(
target=opus_enc,
args=(
encoding_queue,
track_wav,
track
)
)
p.start()
if cd_wav not in cds:
cds.add(cd_wav)
tracks.add(track_wav)
while not encoding_queue.empty():
time.sleep(0.2)
for cd in cds:
remove_if_exists(cd)
for track in tracks:
remove_if_exists(track)
remove_if_exists(self.filename)
print(self.filename, "done!")
def __performer(self, s):
if not self.tracks:
self.cd_performer = s
else:
self.tracks[-1].performer = s
def __title(self, s):
if not self.tracks:
self.cd_title = s
else:
self.tracks[-1].title = s
def __genre(self, s):
self.cd_genre = s
def __date(self, s):
self.cd_date = s
def __file(self, s):
self.current_file = s
def __track(self, s):
self.tracks.append( Track(s, self.filepath + self.current_file, self) )
def __index(self, idx, s):
idx = int(idx)
self.tracks[-1].time[idx] = self.index_split(s)
@staticmethod
def index_split(s):
t = s.split(':')
return float(t[0])*60 + float(t[1]) + float(t[2]) / 75.0
@staticmethod
def dqstrip(s):
if s[0] == '"' and s[-1] == '"': return s[1:-1]
return s
@staticmethod
def unquote(t):
return tuple([CueSheet.dqstrip(s.strip()) for s in t])
class SplitterWorker(Thread):
def __init__(self, queue, filename):
Thread.__init__(self)
self.queue = queue
self.filename = filename
def run(self):
sheet = CueSheet(self.filename)
sheet.read()
sheet.split()
if __name__ == '__main__':
queue = Queue()
for root, dirs, files in os.walk('.'):
for name in files:
if name[-4:].lower() == '.cue':
worker = SplitterWorker(queue, root + '\\' + name)
worker.daemon = True
worker.start()
if os.path.exists('./stop'):
exit(1)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# overly complicated flac reencoder originally written for the big touhou music torrent on nyaa.si
# contains example of process calling and multithreading
# not recommended to use (probably destructive)
import os, re, time
import subprocess
import wave
from queue import Queue
from threading import Thread
f = open("errlog", "w", encoding="UTF-8")
def remove_if_exists(filename):
if os.path.exists(filename):
os.remove(filename)
def opus_enc(queue, split_track_filename, track, quality=140.0):
subprocess.call([
'opusenc',
'--vbr',
'--bitrate', str(quality),
#'--comp', 10, #default
#'--framesize', '60', # default 20
'--artist', track.performer,
'--comment', 'tracknumber={}'.format(track.index),
'--title', track.title,
'--date', track.cd_date,
'--genre', track.cd_genre,
'--album', track.cd_title,
split_track_filename,
'{}.opus'.format(os.path.splitext(split_track_filename)[0]),
])
queue.get()
class Track():
def __init__(self, track_index, filename, parent):
for member in ('cd_performer', 'cd_title', 'cd_date', 'cd_genre'):
setattr(self, member, getattr(parent, member))
self.filename = filename
self.filepath = filename[:filename.rfind('\\')+1]
self.title = ''
self.index = track_index
self.performer = self.cd_performer
self.time = { 1:0.0 }
def __str__(self):
return "{} - {} - {}".format(self.index, self.title, self.time)
class CueSheet():
def __init__(self, filename):
self.filename = filename
self.filepath = filename[:filename.rfind('\\')+1]
self.cd_performer = ''
self.cd_title = ''
self.cd_genre = ''
self.cd_date = ''
self.current_file = ''
self.tracks = []
self.regex_lst = (
(re.compile(r'PERFORMER\s(.+)'), self.__performer),
(re.compile(r'REM DATE\s(.+)'), self.__date),
(re.compile(r'REM GENRE\s(.+)'), self.__genre),
(re.compile(r'TITLE\s(.+)'), self.__title),
(re.compile(r'FILE\s(.+)\sWAVE'), self.__file),
(re.compile(r'TRACK\s(\d{2})\sAUDIO'), self.__track),
(re.compile(r'INDEX\s(\d{2})\s(\d{1,3}:\d{2}:\d{2})'), self.__index),
)
def __str__(self):
value = "Title: {}\nPerformer: {}\nGenre: {}\nDate: {}\n".format(self.cd_title, self.cd_performer, self.cd_genre, self.cd_date)
for track in self.tracks:
value += ' ' + str(track) + '\n'
return value
def read(self):
with open(self.filename, 'r', encoding='utf-8-sig') as f:
for line in f:
for regex, handler in self.regex_lst:
mobj = regex.match(line.strip())
if mobj:
handler(*self.unquote(mobj.groups()))
def split(self):
encoding_queue = multiprocessing.Queue(multiprocessing.cpu_count())
cds = set()
tracks = set()
for i, track in enumerate(self.tracks):
# FATAL: sheet is not for .tta file
if track.filename[-4:] != '.tta':
f.write("\nFilename isn't .tta ({}):\n{}\n".format(track.filename, str(self)))
return
track_path = track.filepath + ' - '.join((track.index, track.title)).replace('?', '').replace('\\', '').replace('\\', '').replace(':', '')
track_opus = track_path + '.opus'
track_wav = track_path + '.wav'
if os.path.exists(track_opus):
f.write("File already exists, continuing... ({})".format(track_opus))
remove_if_exists(track_wav)
continue
cd_wav = track.filename[:-4] + '.wav'
# decode .tta if needed
if not os.path.exists(cd_wav):
# FATAL: no file to decode
if not os.path.exists(track.filename):
f.write("\nFile doesn't exist ({}):\n{}\n".format(track.filename, str(self)))
return
result = subprocess.call([
'tta', #'ttaenc',
'-d',
track.filename,
#'-o',
cd_wav
])
# FATAL: .tta decode failed
if result != 0:
f.write("Failed to decode .tta ({}):\n{}\n\n".format(track.index, str(self)))
return
# remove .tta
remove_if_exists(track.filename)
# split .wav into track
if not os.path.exists(track_wav):
wafi = wave.open(cd_wav, 'rb')
param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname')
params = wafi.getparams()
param_dict = dict(zip(param_names, params))
start = int(param_dict['framerate'] * track.time[1])
stop = param_dict['nframes']
if len(sheet.tracks) > i+1 and sheet.tracks[i+1].filename == track.filename:
stop = int(param_dict['framerate'] * sheet.tracks[i+1].time.get(0, sheet.tracks[i+1].time[1]))
wafi_write = wave.open(track_wav, 'wb')
newparams = list(params)
newparams[3] = 0
wafi_write.setparams( tuple(newparams) )
wafi.setpos(start)
wafi_write.writeframes(wafi.readframes(stop-start))
wafi_write.close()
wafi.close()
encoding_queue.put(track_wav)
p = multiprocessing.Process(
target=opus_enc,
args=(
encoding_queue,
track_wav,
track
)
)
p.start()
if cd_wav not in cds:
cds.add(cd_wav)
tracks.add(track_wav)
while not encoding_queue.empty():
time.sleep(0.2)
for cd in cds:
remove_if_exists(cd)
for track in tracks:
remove_if_exists(track)
remove_if_exists(self.filename)
print(self.filename, "done!")
def __performer(self, s):
if not self.tracks:
self.cd_performer = s
else:
self.tracks[-1].performer = s
def __title(self, s):
if not self.tracks:
self.cd_title = s
else:
self.tracks[-1].title = s
def __genre(self, s):
self.cd_genre = s
def __date(self, s):
self.cd_date = s
def __file(self, s):
self.current_file = s
def __track(self, s):
self.tracks.append( Track(s, self.filepath + self.current_file, self) )
def __index(self, idx, s):
idx = int(idx)
self.tracks[-1].time[idx] = self.index_split(s)
@staticmethod
def index_split(s):
t = s.split(':')
return float(t[0])*60 + float(t[1]) + float(t[2]) / 75.0
@staticmethod
def dqstrip(s):
if s[0] == '"' and s[-1] == '"': return s[1:-1]
return s
@staticmethod
def unquote(t):
return tuple([CueSheet.dqstrip(s.strip()) for s in t])
class SplitterWorker(Thread):
def __init__(self, queue, filename):
Thread.__init__(self)
self.queue = queue
self.filename = filename
def run(self):
sheet = CueSheet(self.filename)
sheet.read()
sheet.split()
if __name__ == '__main__':
queue = Queue()
for root, dirs, files in os.walk('.'):
for name in files:
if name[-4:].lower() == '.cue':
worker = SplitterWorker(queue, root + '\\' + name)
worker.daemon = True
worker.start()
if os.path.exists('./stop'):
exit(1)

View File

@@ -1,16 +1,17 @@
#!/usr/bin/env python3
# extracts cover from flac audio file
from mutagen.flac import FLAC, Picture
song = "cover.flac"
var = FLAC(song)
pics = var.pictures
print (pics)
for p in pics:
if p.type == 3: #front cover
print("\nfound front cover")
with open("cover.jpg", "wb") as f:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# extracts cover from flac audio file
from mutagen.flac import FLAC, Picture
song = "cover.flac"
var = FLAC(song)
pics = var.pictures
print (pics)
for p in pics:
if p.type == 3: #front cover
print("\nfound front cover")
with open("cover.jpg", "wb") as f:
f.write(p.data)

View File

@@ -1,45 +1,46 @@
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
class Ore:
def __init__(self, name, low, high):
self.name = name
self.low = low
self.high = high
oregen = (
Ore('gold', 0, 32),
Ore('iron', 0, 64),
Ore('coal', 0, 128),
Ore('lapis', 0, 32),
Ore('diamond', 0, 16),
Ore('redstone', 0, 16),
Ore('emerald', 0, 16),
Ore('certuz', 24, 48),
Ore('apatite', 54, 96),
Ore('uranium', 16, 42),
Ore('ruby', 16, 42),
Ore('sapphire', 16, 42),
Ore('bauxite', 48, 72),
Ore('tungsten', 0, 10),
Ore('peridot', 16, 42),
Ore('copper', 40, 75),
Ore('tin', 20, 55),
Ore('silver', 5, 30),
Ore('lead', 5, 30),
Ore('aluminum', 48, 72),
Ore('nickel', 5, 20),
Ore('platinum', 7, 75),
Ore('iridium', 8, 75),
)
oregen = sorted(oregen, key=lambda x: x.low, reverse=True)
plt.boxplot([(x.high, x.low) for x in oregen], labels=[x.name.title() for x in oregen], vert=False)
plt.title('FTB Continuum Oregen')
plt.xlabel('Y-level')
plt.xticks()
plt.savefig('oregen.svg', format='svg')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
class Ore:
def __init__(self, name, low, high):
self.name = name
self.low = low
self.high = high
oregen = (
Ore('gold', 0, 32),
Ore('iron', 0, 64),
Ore('coal', 0, 128),
Ore('lapis', 0, 32),
Ore('diamond', 0, 16),
Ore('redstone', 0, 16),
Ore('emerald', 0, 16),
Ore('certuz', 24, 48),
Ore('apatite', 54, 96),
Ore('uranium', 16, 42),
Ore('ruby', 16, 42),
Ore('sapphire', 16, 42),
Ore('bauxite', 48, 72),
Ore('tungsten', 0, 10),
Ore('peridot', 16, 42),
Ore('copper', 40, 75),
Ore('tin', 20, 55),
Ore('silver', 5, 30),
Ore('lead', 5, 30),
Ore('aluminum', 48, 72),
Ore('nickel', 5, 20),
Ore('platinum', 7, 75),
Ore('iridium', 8, 75),
)
oregen = sorted(oregen, key=lambda x: x.low, reverse=True)
plt.boxplot([(x.high, x.low) for x in oregen], labels=[x.name.title() for x in oregen], vert=False)
plt.title('FTB Continuum Oregen')
plt.xlabel('Y-level')
plt.xticks()
plt.savefig('oregen.svg', format='svg')

View File

@@ -1,14 +1,15 @@
#!/usr/bin/env python3
# converts code signatures found in ida to ones easily usable in c++ code
import sys
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " [ida style sig]")
exit(1)
print(
'"' + ''.join([('\\x' + (byte if byte != '?' else '00')) for byte in sys.argv[1:]]) + '", "' +
''.join([('x' if byte != '?' else '?') for byte in sys.argv[1:]]) + '"'
)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# converts code signatures found in ida to ones easily usable in c++ code
import sys
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " [ida style sig]")
exit(1)
print(
'"' + ''.join([('\\x' + (byte if byte != '?' else '00')) for byte in sys.argv[1:]]) + '", "' +
''.join([('x' if byte != '?' else '?') for byte in sys.argv[1:]]) + '"'
)

97
python/speedtyper.py Normal file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import curses
class Word():
def __init__(self, target, x, y):
self.target = target
self.input = ""
self.x = x
self.y = y
def get_words(text, max_x, max_y):
curr_x = 0
curr_y = 0
words = []
for word in text.split():
if curr_x + len(word) > max_x:
curr_y += 1
curr_x = 0
if curr_y > max_y:
print("text too long, scrolling not implemented.")
exit(1)
words.append(Word(word, curr_x, curr_y))
curr_x += len(word) + 1
return words
def main_curses(stdscr, text):
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_GREEN, -1)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
key = old_max_y = old_max_x = curr_word = 0
while True:
# rewrap words on screen size change
max_y, max_x = stdscr.getmaxyx()
if max_y != old_max_y or max_x != old_max_x:
words = get_words(text, max_x, max_y)
old_max_x = max_x
old_max_y = max_y
if key in (curses.KEY_BACKSPACE, 'KEY_BACKSPACE', '\b', '\x7f', 127): # fml
words[curr_word].input = words[curr_word].input[:-1]
elif key >= 32 and key <= 126:
words[curr_word].input += chr(key)
# increment current word if its completed
if words[curr_word].input == words[curr_word].target:
curr_word += 1
# text complete
if curr_word == len(words):
break
stdscr.clear()
for word in words:
if word.input == word.target:
stdscr.addstr(word.y, word.x, word.target, curses.A_DIM)
elif word == words[curr_word]:
stdscr.addstr(word.y, word.x, word.target, curses.A_UNDERLINE)
else:
stdscr.addstr(word.y, word.x, word.target)
for i, c in enumerate(words[curr_word].input):
if i >= len(words[curr_word].target) or words[curr_word].input[i] != words[curr_word].target[i]:
stdscr.addstr(words[curr_word].y, words[curr_word].x + i, c, curses.color_pair(2) | curses.A_BOLD)
else:
stdscr.addstr(words[curr_word].y, words[curr_word].x + i, c, curses.color_pair(1))
stdscr.move(words[curr_word].y, words[curr_word].x + len(words[curr_word].input))
key = stdscr.getch()
def main(argv):
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("input")
args = parser.parse_args(argv[1:])
text = open(args.input).read()
curses.wrapper(main_curses, text)
if __name__ == "__main__":
main(sys.argv)

View File

@@ -1,45 +1,46 @@
#!/usr/bin/env python3
import bs4
import sys
import os
import glob
# converts MyAnimeList's XML exports to readable (but less informative) text files.
animelists = glob.glob('animelist*.xml')
for animelist in animelists:
with open(animelist, 'r') as xml, open(animelist + '.txt', 'w') as file:
soup = bs4.BeautifulSoup(xml.read(), 'html.parser')
completed = []
ptw = []
movies = []
for anime in soup.select('anime'):
line = anime.select('series_title')[0].text + ' ' + anime.select('my_watched_episodes')[0].text + '/' + anime.select('series_episodes')[0].text + '\n'
series_type = anime.select('series_type')[0].text
status = anime.select('my_status')[0].text
if series_type == 'Movie':
movies.append(line)
continue
if status == 'Completed':
completed.append(line)
elif status == 'Plan to Watch':
ptw.append(line)
for title in completed:
file.write(title)
file.write('\n')
for title in ptw:
file.write(title)
file.write('\n')
for title in movies:
file.write(title)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import bs4
import sys
import os
import glob
# converts MyAnimeList's XML exports to readable (but less informative) text files.
animelists = glob.glob('animelist*.xml')
for animelist in animelists:
with open(animelist, 'r') as xml, open(animelist + '.txt', 'w') as file:
soup = bs4.BeautifulSoup(xml.read(), 'html.parser')
completed = []
ptw = []
movies = []
for anime in soup.select('anime'):
line = anime.select('series_title')[0].text + ' ' + anime.select('my_watched_episodes')[0].text + '/' + anime.select('series_episodes')[0].text + '\n'
series_type = anime.select('series_type')[0].text
status = anime.select('my_status')[0].text
if series_type == 'Movie':
movies.append(line)
continue
if status == 'Completed':
completed.append(line)
elif status == 'Plan to Watch':
ptw.append(line)
for title in completed:
file.write(title)
file.write('\n')
for title in ptw:
file.write(title)
file.write('\n')
for title in movies:
file.write(title)

View File

@@ -1,14 +1,15 @@
#!/usr/bin/env python3
# converts youtube subscriptions export .xml to simple text file
with open('subscription_manager', 'r', encoding='utf-8') as f:
while f:
line = f.readline()
if not 'channel_id' in line:
continue
idpos = line.find('channel_id=') + len('channel_id=')
channel_id = line[idpos:idpos + len('UC3Y4vKAzTCqSdOt0ZeYWvTg')]
print('https://www.youtube.com/channel/' + channel_id)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# converts youtube subscriptions export .xml to simple text file
with open('subscription_manager', 'r', encoding='utf-8') as f:
while f:
line = f.readline()
if not 'channel_id' in line:
continue
idpos = line.find('channel_id=') + len('channel_id=')
channel_id = line[idpos:idpos + len('UC3Y4vKAzTCqSdOt0ZeYWvTg')]
print('https://www.youtube.com/channel/' + channel_id)

View File

@@ -1,4 +1,5 @@
#!/bin/env python3
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os

View File

@@ -1,47 +1,50 @@
import requests
import json
import sys
if len(sys.argv) < 3:
print("usage: python get_7k_pp.py [api key] [username]")
exit(0)
api_key = sys.argv[1]
usernames = sys.argv[2:]
def get_json(url):
page = requests.get(url)
try:
page.raise_for_status()
except:
print("Could not get '{}'".format(url))
return []
return json.loads(page.text)
def get_user_best(api_key, username):
return get_json("https://osu.ppy.sh/api/get_user_best?k={}&limit=100&m=3&u={}".format(api_key, username))
def get_beatmap(api_key, id):
return get_json("https://osu.ppy.sh/api/get_beatmaps?k={}&b={}".format(api_key, id))
for username in usernames:
scores_7k = []
pp_7k = 0.0
for score in get_user_best(api_key, username):
info = get_beatmap(api_key, score["beatmap_id"])[0]
if info["diff_size"] == '7':
# theres probably a prettier solution for this
percentage = 100.0
for num in range(len(scores_7k)):
percentage *= 0.95
pp_7k += float(score["pp"]) / 100.0 * percentage
scores_7k.append("[{:.2f}+ ({:.2f}%)] {:.2f} {} [{}] {}k".format(pp_7k, percentage, float(score["pp"]), info["title"], info["version"], score["score"][:3]))
print("7k pp for '{}'".format(username))
print("Total = {}".format(pp_7k))
for score_7k in scores_7k:
print(score_7k)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import json
import sys
if len(sys.argv) < 3:
print("usage: python get_7k_pp.py [api key] [username]")
exit(0)
api_key = sys.argv[1]
usernames = sys.argv[2:]
def get_json(url):
page = requests.get(url)
try:
page.raise_for_status()
except:
print("Could not get '{}'".format(url))
return []
return json.loads(page.text)
def get_user_best(api_key, username):
return get_json("https://osu.ppy.sh/api/get_user_best?k={}&limit=100&m=3&u={}".format(api_key, username))
def get_beatmap(api_key, id):
return get_json("https://osu.ppy.sh/api/get_beatmaps?k={}&b={}".format(api_key, id))
for username in usernames:
scores_7k = []
pp_7k = 0.0
for score in get_user_best(api_key, username):
info = get_beatmap(api_key, score["beatmap_id"])[0]
if info["diff_size"] == '7':
# theres probably a prettier solution for this
percentage = 100.0
for num in range(len(scores_7k)):
percentage *= 0.95
pp_7k += float(score["pp"]) / 100.0 * percentage
scores_7k.append("[{:.2f}+ ({:.2f}%)] {:.2f} {} [{}] {}k".format(pp_7k, percentage, float(score["pp"]), info["title"], info["version"], score["score"][:3]))
print("7k pp for '{}'".format(username))
print("Total = {}".format(pp_7k))
for score_7k in scores_7k:
print(score_7k)

View File

@@ -1,34 +1,37 @@
import sys
from selenium import webdriver
def login(driver, username, password):
driver.get("https://osu.ppy.sh/forum/ucp.php?mode=login")
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def main(argv):
if len(argv) < 5:
print("Usage: {} [USERNAME] [PASSWORD] [SEARCH PARAMS] [NUM PAGES]".format(argv[0]))
return
username = argv[1]
password = argv[2]
search_params = argv[3].strip('"')
num_pages = int(argv[4])
with webdriver.Firefox() as driver:
login(driver, username, password)
for page_nr in range(1, num_pages + 1):
if page_nr == 0:
continue
driver.get("https://old.ppy.sh/p/beatmaplist?{}&page={}".format(search_params, page_nr))
for beatmap_elem in driver.find_elements_by_class_name("beatmap"):
print(beatmap_elem.get_property("id"))
if __name__ == "__main__":
main(sys.argv)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from selenium import webdriver
def login(driver, username, password):
driver.get("https://osu.ppy.sh/forum/ucp.php?mode=login")
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def main(argv):
if len(argv) < 5:
print("Usage: {} [USERNAME] [PASSWORD] [SEARCH PARAMS] [NUM PAGES]".format(argv[0]))
return
username = argv[1]
password = argv[2]
search_params = argv[3].strip('"')
num_pages = int(argv[4])
with webdriver.Firefox() as driver:
login(driver, username, password)
for page_nr in range(1, num_pages + 1):
if page_nr == 0:
continue
driver.get("https://old.ppy.sh/p/beatmaplist?{}&page={}".format(search_params, page_nr))
for beatmap_elem in driver.find_elements_by_class_name("beatmap"):
print(beatmap_elem.get_property("id"))
if __name__ == "__main__":
main(sys.argv)

View File

@@ -1,23 +1,24 @@
#!/usr/bin/env python3
import requests # http requests
import bs4 # html parser
with open("titles.txt", "w", encoding="UTF-8") as file:
for index in range(1, 175):
url = "https://www.gogdb.org/products?page=" + str(index)
print(url)
page = requests.get("https://www.gogdb.org/products?page=" + str(index))
page.raise_for_status()
soup = bs4.BeautifulSoup(page.text, "html.parser")
producttable = soup.select("#product-table")[0]
titles = producttable.select("tr")
for title in titles:
if len(title.select(".col-type")) == 0:
continue
if title.select(".col-type")[0].text == 'Game':
file.write(title.select(".col-name")[0].text.strip() + '\n')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests # http requests
import bs4 # html parser
with open("titles.txt", "w", encoding="UTF-8") as file:
for index in range(1, 175):
url = "https://www.gogdb.org/products?page=" + str(index)
print(url)
page = requests.get("https://www.gogdb.org/products?page=" + str(index))
page.raise_for_status()
soup = bs4.BeautifulSoup(page.text, "html.parser")
producttable = soup.select("#product-table")[0]
titles = producttable.select("tr")
for title in titles:
if len(title.select(".col-type")) == 0:
continue
if title.select(".col-type")[0].text == 'Game':
file.write(title.select(".col-name")[0].text.strip() + '\n')

View File

@@ -1,3 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests, bs4, time
def get_titles(filename, title_type, maxrank):

View File

@@ -1,137 +1,138 @@
#!/usr/bin/env python3
# reads SteamIDs from ./accounts.txt and outputs ban information into ./output.html
import urllib.request
import json
import time
steamapikey = ""
# read file and remove trailing newline because we're making a list
account_lines = [line.rstrip("\n") for line in open("accounts.txt").readlines()]
ids = []
for line in account_lines:
# https://developer.valvesoftware.com/wiki/SteamID
Z = int(line.split(':')[2])
V = 0x0110000100000000 # profile ID constant
Y = int(line.split(':')[1])
W = Z * 2 + V + Y
ids.append(str(W))
# API takes in comma seperated steamids
ids_string = ",".join([x for x in ids])
# https://developer.valvesoftware.com/wiki/Steam_Web_API
summaries = json.load(urllib.request.urlopen("http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=" + steamapikey + "&steamids=" + ids_string))
bans = json.load(urllib.request.urlopen("http://api.steampowered.com/ISteamUser/GetPlayerBans/v1/?key=" + steamapikey + "&steamids=" + ids_string))
output_file = open("output.html", "w", encoding="utf-8")
output_file.write('\
<!DOCTYPE html>\n\
<html>\n\
<head>\n\
<style>\n\
body {\n\
font-family: sans-serif;\n\
}\n\
\n\
table {\n\
color: #222;\n\
border-collapse: collapse;\n\
}\n\
\n\
tr, th, td {\n\
border: 1px solid #a2a9b1;\n\
padding: 0.2em 0.4em;\n\
}\n\
\n\
.pwned {\n\
background-color: #ffb6c1\n\
}\n\
\n\
th {\n\
background-color: #eaecf0;\n\
text-align: center;\n\
}\n\
\n\
a:hover, a:visited, a:link, a:active {\n\
text-decoration: none;\n\
}\n\
</style>\n\
</head>\n\
\n\
<body>\n\
<table>\n\
<tr>\n\
<th>ID</th>\n\
<th>Name</th>\n\
<th>Status</th>\n\
<th>Type</th>\n\
<th>BanDays</th>\n\
<th>LogDays</th>\n\
<th>Profile</th>\n\
</tr>\n\
')
numbanned = 0
for i in range(len(ids)):
try:
for summary in summaries['response']['players']:
if summary['steamid'] == str(ids[i]):
break
except:
continue
try:
for ban in bans['players']:
if ban['SteamId'] == str(ids[i]):
break
except:
continue
status = ""
bantype = ""
bandays = ""
if ban['VACBanned']:
status = "Pwned"
bantype = "VAC"
bandays = str(ban['DaysSinceLastBan'])
numbanned += 1
if ban['NumberOfGameBans'] > 0:
status = "Pwned"
bantype = "Gameban"
bandays = str(ban['DaysSinceLastBan'])
numbanned += 1
name = summary['personaname']
name = name.replace("<", "&lt;") # escape html tag names
name = name.replace(">", "&gt;")
logdays = str(int((time.time() - summary['lastlogoff']) / 86400)) # length of a day in epoch
line_start = ' <td>' if status != "Pwned" else ' <td class="pwned">'
output_file.write(' <tr>\n')
output_file.write(line_start + '<a target="_blank" href="' + 'https://steamcommunity.com/profiles/' + str(ids[i]) + '">' + str(ids[i]) + '</a></td>\n')
output_file.write(line_start + name + '</td>\n')
output_file.write(line_start + status + '</td>\n')
output_file.write(line_start + bantype + '</td>\n')
output_file.write(line_start + bandays + '</td>\n')
output_file.write(line_start + logdays + '</td>\n')
output_file.write(line_start + '<a target="_blank" href="' + 'https://steamcommunity.com/profiles/' + str(ids[i]) + '"><img src=' + summary['avatarmedium'] + ">"+ '</img></td>\n')
output_file.write(' </tr>\n')
i += 1
output_file.write('\
</table>\n\
' + str(numbanned) + '/' + str(len(ids)) + ' banned\n\
</body>\n\
\
</html>\n')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# reads SteamIDs from ./accounts.txt and outputs ban information into ./output.html
import urllib.request
import json
import time
steamapikey = ""
# read file and remove trailing newline because we're making a list
account_lines = [line.rstrip("\n") for line in open("accounts.txt").readlines()]
ids = []
for line in account_lines:
# https://developer.valvesoftware.com/wiki/SteamID
Z = int(line.split(':')[2])
V = 0x0110000100000000 # profile ID constant
Y = int(line.split(':')[1])
W = Z * 2 + V + Y
ids.append(str(W))
# API takes in comma seperated steamids
ids_string = ",".join([x for x in ids])
# https://developer.valvesoftware.com/wiki/Steam_Web_API
summaries = json.load(urllib.request.urlopen("http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=" + steamapikey + "&steamids=" + ids_string))
bans = json.load(urllib.request.urlopen("http://api.steampowered.com/ISteamUser/GetPlayerBans/v1/?key=" + steamapikey + "&steamids=" + ids_string))
output_file = open("output.html", "w", encoding="utf-8")
output_file.write('\
<!DOCTYPE html>\n\
<html>\n\
<head>\n\
<style>\n\
body {\n\
font-family: sans-serif;\n\
}\n\
\n\
table {\n\
color: #222;\n\
border-collapse: collapse;\n\
}\n\
\n\
tr, th, td {\n\
border: 1px solid #a2a9b1;\n\
padding: 0.2em 0.4em;\n\
}\n\
\n\
.pwned {\n\
background-color: #ffb6c1\n\
}\n\
\n\
th {\n\
background-color: #eaecf0;\n\
text-align: center;\n\
}\n\
\n\
a:hover, a:visited, a:link, a:active {\n\
text-decoration: none;\n\
}\n\
</style>\n\
</head>\n\
\n\
<body>\n\
<table>\n\
<tr>\n\
<th>ID</th>\n\
<th>Name</th>\n\
<th>Status</th>\n\
<th>Type</th>\n\
<th>BanDays</th>\n\
<th>LogDays</th>\n\
<th>Profile</th>\n\
</tr>\n\
')
numbanned = 0
for i in range(len(ids)):
try:
for summary in summaries['response']['players']:
if summary['steamid'] == str(ids[i]):
break
except:
continue
try:
for ban in bans['players']:
if ban['SteamId'] == str(ids[i]):
break
except:
continue
status = ""
bantype = ""
bandays = ""
if ban['VACBanned']:
status = "Pwned"
bantype = "VAC"
bandays = str(ban['DaysSinceLastBan'])
numbanned += 1
if ban['NumberOfGameBans'] > 0:
status = "Pwned"
bantype = "Gameban"
bandays = str(ban['DaysSinceLastBan'])
numbanned += 1
name = summary['personaname']
name = name.replace("<", "&lt;") # escape html tag names
name = name.replace(">", "&gt;")
logdays = str(int((time.time() - summary['lastlogoff']) / 86400)) # length of a day in epoch
line_start = ' <td>' if status != "Pwned" else ' <td class="pwned">'
output_file.write(' <tr>\n')
output_file.write(line_start + '<a target="_blank" href="' + 'https://steamcommunity.com/profiles/' + str(ids[i]) + '">' + str(ids[i]) + '</a></td>\n')
output_file.write(line_start + name + '</td>\n')
output_file.write(line_start + status + '</td>\n')
output_file.write(line_start + bantype + '</td>\n')
output_file.write(line_start + bandays + '</td>\n')
output_file.write(line_start + logdays + '</td>\n')
output_file.write(line_start + '<a target="_blank" href="' + 'https://steamcommunity.com/profiles/' + str(ids[i]) + '"><img src=' + summary['avatarmedium'] + ">"+ '</img></td>\n')
output_file.write(' </tr>\n')
i += 1
output_file.write('\
</table>\n\
' + str(numbanned) + '/' + str(len(ids)) + ' banned\n\
</body>\n\
\
</html>\n')

View File

@@ -1,58 +1,59 @@
#!/usr/bin/env python3
import requests
import bs4
import sys
if len(sys.argv) < 4:
print('Usage: ' + sys.argv[0] + ' [login] [password] [page name]')
exit(1)
login = sys.argv[1]
password = sys.argv[2]
page_name = sys.argv[3]
def download_sheet(s, url):
page = s.get(url)
try:
page.raise_for_status()
except:
print("Couldn't get %s" % url)
return
soup = bs4.BeautifulSoup(page.text, 'html.parser')
links = soup.select('a')
for link in links:
if '.pdf' in link.text:
with open(link.text[1:link.text.find('.pdf') + 4], 'wb') as f:
file = s.get(link.attrs['href'])
try:
page.raise_for_status()
except:
print("Couldn't get %s" % link.text)
return
for chunk in file.iter_content(100000):
f.write(chunk)
with requests.session() as s:
login = s.post('https://hi10anime.com/wp-login.php', { 'login':login, 'password':password })
login.raise_for_status()
if not 'You have successfully logged in. Welcome back!' in login.text:
print("Couldn't log in")
exit(1)
page = s.get('https://sheet.host/user/%s/sheets' % page_name)
page.raise_for_status()
soup = bs4.BeautifulSoup(page.text, 'html.parser')
titles = soup.select('.score-title')
for title in titles:
print('Getting %s' % title.text)
download_sheet(s, title.attrs['href'])
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import bs4
import sys
if len(sys.argv) < 4:
print('Usage: ' + sys.argv[0] + ' [login] [password] [page name]')
exit(1)
login = sys.argv[1]
password = sys.argv[2]
page_name = sys.argv[3]
def download_sheet(s, url):
page = s.get(url)
try:
page.raise_for_status()
except:
print("Couldn't get %s" % url)
return
soup = bs4.BeautifulSoup(page.text, 'html.parser')
links = soup.select('a')
for link in links:
if '.pdf' in link.text:
with open(link.text[1:link.text.find('.pdf') + 4], 'wb') as f:
file = s.get(link.attrs['href'])
try:
page.raise_for_status()
except:
print("Couldn't get %s" % link.text)
return
for chunk in file.iter_content(100000):
f.write(chunk)
with requests.session() as s:
login = s.post('https://hi10anime.com/wp-login.php', { 'login':login, 'password':password })
login.raise_for_status()
if not 'You have successfully logged in. Welcome back!' in login.text:
print("Couldn't log in")
exit(1)
page = s.get('https://sheet.host/user/%s/sheets' % page_name)
page.raise_for_status()
soup = bs4.BeautifulSoup(page.text, 'html.parser')
titles = soup.select('.score-title')
for title in titles:
print('Getting %s' % title.text)
download_sheet(s, title.attrs['href'])

View File

@@ -1,66 +1,67 @@
#!/usr/bin/env python3
import requests
import bs4
def download_file(url):
filename = url[url.rfind('/') + 1:]
print('Downloading %s' % filename)
file = requests.get(url)
try:
file.raise_for_status()
except:
open(filename + '.failed', 'w')
with open(filename, 'wb') as f:
for chunk in file.iter_content(100000):
f.write(chunk)
f.close()
def get_file_name(url):
page = requests.get(url)
page.raise_for_status()
soup = bs4.BeautifulSoup(page.text, "html.parser")
cells = soup.select('td.even') # gay retardness
for cell in cells:
text = cell.getText()
if '.rar' in text or '.zip' in text or '.7z' in text:
return text
def scrape_site(url):
# split the url to use later for constructing new urls
base_url = url[:url.rfind('/') + 1]
url = url[url.rfind('/') + 1:]
while True:
print('Getting %s' % url)
page = requests.get(base_url + url)
page.raise_for_status() # throw on fail
soup = bs4.BeautifulSoup(page.text, "html.parser")
titles = soup.select('a[title]')
for title in titles:
link = title.attrs['href']
if 'id' in link and not 'dl' in link: # find content links
print('Found %s' % title.attrs['title'])
download_file(base_url + 'sub/enganime/' + get_file_name(base_url + link))
next_link = soup.select('span.pagenav_next > a')
if len(next_link) == 0:
print('End of site')
break
url = next_link[0].attrs['href']
scrape_site('http://subs.com.ru/list.php?c=enganime&p=5&w=asc&d=1')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import bs4
def download_file(url):
filename = url[url.rfind('/') + 1:]
print('Downloading %s' % filename)
file = requests.get(url)
try:
file.raise_for_status()
except:
open(filename + '.failed', 'w')
with open(filename, 'wb') as f:
for chunk in file.iter_content(100000):
f.write(chunk)
f.close()
def get_file_name(url):
page = requests.get(url)
page.raise_for_status()
soup = bs4.BeautifulSoup(page.text, "html.parser")
cells = soup.select('td.even') # gay retardness
for cell in cells:
text = cell.getText()
if '.rar' in text or '.zip' in text or '.7z' in text:
return text
def scrape_site(url):
# split the url to use later for constructing new urls
base_url = url[:url.rfind('/') + 1]
url = url[url.rfind('/') + 1:]
while True:
print('Getting %s' % url)
page = requests.get(base_url + url)
page.raise_for_status() # throw on fail
soup = bs4.BeautifulSoup(page.text, "html.parser")
titles = soup.select('a[title]')
for title in titles:
link = title.attrs['href']
if 'id' in link and not 'dl' in link: # find content links
print('Found %s' % title.attrs['title'])
download_file(base_url + 'sub/enganime/' + get_file_name(base_url + link))
next_link = soup.select('span.pagenav_next > a')
if len(next_link) == 0:
print('End of site')
break
url = next_link[0].attrs['href']
scrape_site('http://subs.com.ru/list.php?c=enganime&p=5&w=asc&d=1')