finish rewrite

This commit is contained in:
olari
2021-06-26 16:06:01 +03:00
parent ad9ead66d0
commit 7e72871904
14 changed files with 897 additions and 1031 deletions

5
.gitignore vendored
View File

@@ -1,5 +0,0 @@
**/*.csv
.ipynb_checkpoints/
__pycache__/
diet
journal.json

View File

@@ -1,111 +0,0 @@
from collections import Counter
from common import parse_foods_file, evaluate_food_entry
import string
import sys
import json
journal = json.load(open('journal.json'))
foods, recipes = parse_foods_file()
if len(sys.argv) > 1:
value, name = sys.argv[1:]
value = float(value.removesuffix('g'))
from pprint import pprint
pprint(evaluate_food_entry(foods, recipes, value, name))
exit(0)
total_entries = 0
total_words = 0
word_frequency = Counter()
total_csv = [['day', 'entries', 'words']]
daily_csv = [['day', 'entries', 'words', 'calories', 'carbs', 'fat', 'protein',
'sugar']]
entry_csv = [['timestamp', 'words']]
words_csv = [['word', 'count']]
diet_csv = [[
'timestamp', 'name', 'grams', 'calories', 'carbs', 'fat', 'protein',
'saturated_fat', 'sugar', 'fiber'
]]
for day, obj in journal.items():
daily_entries = len(obj['entries'])
daily_words = 0
daily_calories = 0.0
daily_protein = 0.0
daily_carbs = 0.0
daily_fat = 0.0
daily_sugar = 0.0
for entry in obj['entries']:
for block in entry['blocks']:
if isinstance(block, str):
words = ''.join(
c if c in string.ascii_letters+"'" else ' '
for c in block.lower()
).split()
word_frequency.update(words)
entry_words = len(words)
daily_words += entry_words
elif block['type'] == 'diet':
name = block['food']
value = block['amount']
food = evaluate_food_entry(foods, recipes, value, name)
diet_csv.append((
entry['timestamp'],
name,
value,
round(food.get('Energy', 0.0), 2),
round(food.get('Carbs', 0.0), 2),
round(food.get('Fat', 0.0), 2),
round(food.get('Protein', 0.0), 2),
round(food.get('SaturatedFat', 0.0), 2),
round(food.get('Sugar', 0.0), 2),
round(food.get('Fiber', 0.0), 2),
))
daily_calories += food.get('Energy', 0.0)
daily_protein += food.get('Protein', 0.0)
daily_fat += food.get('Fat', 0.0)
daily_carbs += food.get('Carbs', 0.0)
daily_sugar += food.get('Sugar', 0.0)
entry_csv.append([entry['timestamp'], entry_words])
daily_macros = daily_protein + daily_fat + daily_carbs
daily_csv.append([
day,
daily_entries,
daily_words,
round(daily_calories, 2),
round(100 * (daily_carbs / daily_macros) if daily_carbs else 0, 2),
round(100 * (daily_fat / daily_macros) if daily_fat else 0, 2),
round(100 * (daily_protein / daily_macros) if daily_protein else 0, 2),
round(daily_protein, 2),
round(daily_sugar, 2)
])
total_entries += daily_entries
total_words += daily_words
total_csv.append([day, total_entries, total_words])
words_csv += word_frequency.most_common()
def write_csv(fname, csv):
with open(fname, 'w') as fp:
fp.write('\n'.join(','.join(str(x) for x in row) for row in csv))
write_csv('data/total.csv', total_csv)
write_csv('data/daily.csv', daily_csv)
write_csv('data/entry.csv', entry_csv)
write_csv('data/words.csv', words_csv)
write_csv('data/diet.csv', diet_csv)

View File

@@ -1,5 +0,0 @@
#!/bin/bash
apack $1 *.md
rclone copy $1 gdrive:/journal-backup/
rclone copy $1 prodesk:/home/olari/journal-backup/
rm $1

107
common.py
View File

@@ -1,107 +0,0 @@
from pathlib import Path
from datetime import datetime
def parse_timestamp(timestamp):
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
def format_timestamp(timestamp):
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def parse_foods_file():
path = Path.home() / 'workspace' / 'journal' / 'foods'
text = path.read_text()
foods_str, recipes_str = text.split('---')
def parse_macro(macro):
if macro == '...':
return ('INVALID', 0.0)
name, value = macro.split()
value = float(value.removesuffix('g').removesuffix('kcal'))
return (name, value)
foods = {
macros[0]: dict(parse_macro(macro) for macro in macros[1:])
for macros in [food.split('\n') for food in foods_str.strip().split('\n\n')]
}
def combine_values(fst, snd):
result = fst.copy()
for k,v in snd.items():
if k in fst:
result[k] += v
else:
result[k] = v
return result
recipes = {}
def evaluate_ingredients(ingredients):
result = {}
total_weight = 0.0
for ingredient in ingredients:
k,v = parse_macro(ingredient)
if k == 'TOTAL':
result[k] = v
continue
else:
total_weight += v
food = foods.get(k)
total = 100.0
if not food:
food = recipes[k].copy()
total = food['TOTAL']
del food['TOTAL']
for kk,vv in food.items():
if kk not in result:
result[kk] = 0.0
result[kk] += vv * (v/total)
if 'TOTAL' not in result:
result['TOTAL'] = total_weight
return result
for ingredients in [recipe.split('\n') for recipe in recipes_str.strip().split('\n\n')]:
recipes[ingredients[0]] = evaluate_ingredients(ingredients[1:])
def get_calories_from_macros(mm):
calories = 0.0
for k,v in mm.items():
calories += v * {
'Carbs': 4,
'Fat': 9,
'Protein': 4
}.get(k, 0.0)
return calories
#for k,v in foods.items():
# print(round(v.get('Energy') - get_calories_from_macros(v)), k)
return foods, recipes
def evaluate_food_entry(foods, recipes, value, name):
if name in recipes:
food = recipes[name]
if value == 0.0:
value = food['TOTAL']
food = {k: v*(value/food['TOTAL']) for k,v in food.items()}
elif name in foods:
if value == 0.0:
value = 100
food = {k: v*(value/100.0) for k,v in foods[name].items()}
else:
breakpoint()
print(f'ERROR: Invalid diet entry: {name}')
assert False
return food

View File

@@ -1,28 +0,0 @@
from subprocess import run
import sys
import json
from common import format_timestamp
journal = json.load(open('journal.json'))
matches = []
keywords = sys.argv[1:]
for day, obj in journal.items():
for entry in obj['entries']:
for block in entry['blocks']:
if isinstance(block, str):
words = block.lower().split()
if any(kw in words for kw in keywords):
matches.append((
format_timestamp(entry['timestamp']),
'\n\n'.join([b for b in entry if isinstance(b, str)])
))
buf = ''
for (ts, c) in matches:
buf += f'{ts}\n\n{c}\n\n'
run(['nvim', '-'], input=buf.encode('utf-8'))

View File

@@ -1,24 +0,0 @@
from subprocess import run
import sys
outline = run(
['mutool', 'show', sys.argv[1], 'outline'],
capture_output=True
).stdout.decode('utf-8')
indent = 0
last_quote_index = 0
for line in outline.splitlines():
quote_index = line.find('"')
hash_index = line.find('#')
if quote_index > last_quote_index:
indent += 1
elif quote_index < last_quote_index:
indent -= 1
last_quote_index = quote_index
title = line[quote_index+1:line.find('"', quote_index+1)].strip()
page = int(line[hash_index+1:line.find(',', hash_index+1)])
print(f'{"#"*indent} {title} ({page})')

View File

@@ -1,151 +0,0 @@
from common import format_timestamp
import json
import textwrap
def wrap_text(text):
return textwrap.fill(text, 80,
replace_whitespace=False,
break_on_hyphens=False,
break_long_words=False)
def generate_godword(value):
return f"{' '.join(value[:10])}\n{' '.join(value[10:])}"
def generate_habits(value):
return '\n'.join(f'[{"x" if v else "-"}] {k}' for k,v in value.items())
def generate_notifications(value):
return '\n'.join(f'[[{n["source"]}]] {n["message"]}' for n in value)
def generate_tasks(value):
return '\n'.join(f'[{"x" if v else "-"}] {k}' for k,v in value.items())
header_modules = {
'godword': generate_godword,
'habits': generate_habits,
'notifications': generate_notifications,
'tasks': generate_tasks,
}
def generate_diet(block):
_, amount, food = block.values()
return f'@diet {amount}g {food}'
def generate_exercise(block):
if block['kind'] == 'walk':
return f'@exercise walk {block["minutes"]}min {block["distance"]}km {block["steps"]}steps'
elif block['kind'] == 'calisthenics':
return f'@exercise calisthenics {block["sets"]}x{block["reps"]} {block["exercise"]}'
assert False
def generate_default(block):
return f'@{block["type"]} {block["value"]}'
def generate_post(block):
result = '@post'
if ts := block.get('timestamp'):
result += f' {format_timestamp(ts)}'
if content := block.get('content'):
result += f' {content}'
return wrap_text(result)
def generate_timer(block):
parts = [f'@{block["type"]}']
if name := block.get('name'):
parts.append(name)
if ts := block.get('timestamp'):
parts.append(format_timestamp(ts))
return ' '.join(parts)
def generate_info(block):
return wrap_text(f'@info {block["value"]}')
def generate_notes(block):
parts = ['@notes']
if source := block.get('source'):
parts.append(source)
if title := block.get('title'):
parts.append(title)
return '\n'.join(parts)
def generate_notify(block):
return f'@notify {block["day"]} {block["message"]}'
entry_modules = {
'diet': generate_diet,
'exercise': generate_exercise,
'hide': lambda _: '@hide',
'post': generate_post,
'info': generate_info,
'notes': generate_notes,
'behavior': generate_default,
'task': generate_default,
'start': generate_timer,
'stop': generate_timer,
'done': generate_timer,
'notify': generate_notify,
}
def generate_page(day, header, entries):
result = f'# {day}'
for name, value in header.items():
result += f'\n\n{name.title()}:\n'
result += header_modules[name](value)
def format_block(block, is_first):
def format_text(text):
if all(c == '\n' for c in block):
return text
DUMMY_TS = '2020-02-02 02:02:02 '
if is_first:
text = DUMMY_TS + text
length = len(text)
if length > 80:
text = wrap_text(text)
if is_first:
text = text.removeprefix(DUMMY_TS)
return text
def format_module(module):
return entry_modules[module['type']](module)
formatted = format_text(block) if isinstance(block, str) else format_module(block)
if result[-1] != '\n' and not all(c == '\n' for c in formatted):
formatted = ' ' + formatted
return formatted
for entry in entries:
result += f'\n\n{format_timestamp(entry["timestamp"])}'
for i, block in enumerate(entry['blocks']):
result += format_block(block, i == 0)
result += '\n'
return result
if __name__ == '__main__':
journal = json.load(open('journal.json'))
for curr_day in journal:
header, entries = journal[curr_day].values()
page = generate_page(curr_day, header, entries)
print(page)

897
journal.py Normal file
View File

@@ -0,0 +1,897 @@
from copy import deepcopy
from datetime import datetime, timedelta
from functools import reduce, partial
from pathlib import Path
from shutil import copyfile, rmtree
from subprocess import run
from tempfile import mktemp, mkdtemp
from zipfile import ZipFile
import json
import random
import re
import sys
import textwrap
### GLOBALS
JOURNAL_PATH = Path.home() / '.journal.json'
### UTILS
def nth_or_default(n, l, default):
return l[n] if n < len(l) else default
def apply(f, x):
return f(x)
def flip(f):
return lambda a1, a2: f(a2, a1)
def identity(x):
return x
def lazy_get(obj, key, default):
result = obj.get(key)
return result if result is not None else default()
def wrap_text(text, columns=80):
return textwrap.fill(text, columns,
replace_whitespace=False,
break_on_hyphens=False,
break_long_words=False)
def split_keep(delims, string):
res = []
buf = []
def flush_buf():
nonlocal res, buf
if buf:
res.append(''.join(buf))
buf = []
for c in string:
if c in delims:
flush_buf()
res.append(c)
else:
buf.append(c)
flush_buf()
return res
def merge_if(pred, l):
res = []
for i, curr in enumerate(l):
prev = l[i-1] if i-1 >= 0 else None
if prev and pred(prev, curr):
res[-1] += curr
else:
res.append(curr)
return res
def editor(fpath):
run(['nvim', '+', str(fpath)])
def edit(text, suffix=''):
fpath = Path(mktemp(suffix=suffix))
fpath.write_text(text)
editor(fpath)
text = fpath.read_text()
fpath.unlink()
return text
def prompt(text):
return input(text + ' [y/n] ') == 'y'
### DATE UTILS
def parse_date(date):
return datetime.strptime(date, '%Y-%m-%d')
def format_date(date):
return date.strftime('%Y-%m-%d')
def parse_timestamp(timestamp):
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
def format_timestamp(timestamp):
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def evaluate_time_expression(expression):
if expression == 'today':
return datetime.now()
elif expression == 'yesterday':
return datetime.now() - timedelta(days=1)
### FILE PARSERS
def parse_foods_file(text):
foods_str, recipes_str = text.split('---')
def parse_macro(macro):
name, value = macro.split()
return (name, float(value.removesuffix('g').removesuffix('kcal')))
foods = {
macros[0]: dict(parse_macro(macro) for macro in macros[1:])
for macros in [food.split('\n') for food in foods_str.strip().split('\n\n')]
}
recipes = {}
def evaluate_ingredients(ingredients):
result = {}
total_weight = 0.0
for ingredient in ingredients:
k,v = parse_macro(ingredient)
if k == 'TOTAL':
result[k] = v
continue
else:
total_weight += v
food = foods.get(k)
total = 100.0
if not food:
food = recipes[k].copy()
total = food['TOTAL']
del food['TOTAL']
for kk,vv in food.items():
if kk not in result:
result[kk] = 0.0
result[kk] += vv * (v/total)
if 'TOTAL' not in result:
result['TOTAL'] = total_weight
return result
for ingredients in [recipe.split('\n') for recipe in recipes_str.strip().split('\n\n')]:
recipes[ingredients[0]] = evaluate_ingredients(ingredients[1:])
return foods, recipes
def evaluate_food_entry(foods, recipes, value, name):
if name in recipes:
food = recipes[name]
if value == 0.0:
value = food['TOTAL']
food = {k: v*(value/food['TOTAL']) for k,v in food.items()}
elif name in foods:
if value == 0.0:
value = 100
food = {k: v*(value/100.0) for k,v in foods[name].items()}
else:
breakpoint()
print(f'ERROR: Invalid diet entry: {name}')
assert False
return food
def parse_tasks_file(text):
result = []
for task in text.splitlines():
days, name = task.split(':')
days = days.split(',')
result.append((days, name))
return result
### HEADER MODULES
def create_godword(journal, date):
words = journal['files']['godword'].strip().split('\n')
return [random.choice(words) for _ in range(20)]
def parse_godword(block):
return block.split()
def generate_godword(value):
return f"{' '.join(value[:10])}\n{' '.join(value[10:])}"
def create_habits(journal, date):
return {x: False for x in journal['files']['habits'].strip().split('\n')}
def parse_habits(block):
result = {}
for habit in block.splitlines():
value, name = habit.split(maxsplit=1)
result[name.strip()] = value[1] == 'x'
return result
def generate_habits(value):
return '\n'.join(f'[{"x" if v else "-"}] {k}' for k,v in value.items())
def create_notifications(journal, date):
notifications = []
for day in journal['days'].values():
for entry in day.get('entries'):
for block in entry['blocks']:
if not isinstance(block, str) and block['type'] == 'notify':
if block['day'] == date:
notifications.append({
'source': entry['timestamp'],
'message': block['message']
})
return notifications
def parse_notifications(notifications):
result = []
for notification in notifications.splitlines():
parts = notification.split()
result.append({
'source': int(parse_timestamp(' '.join(parts[0:2]).strip('[]')).timestamp()),
'message': ' '.join(parts[2:]),
})
return result
def generate_notifications(value):
return '\n'.join(f'[[{format_timestamp(n["source"])}]] {n["message"]}' for n in value)
def create_tasks(journal, date):
tasks = parse_tasks_file(journal['files']['tasks'])
curr_day = {
0: 'mo', 1: 'tu', 2: 'we', 3: 'th',
4: 'fr', 5: 'sa', 6: 'su',
}[parse_date(date).weekday()]
return {name: False for days, name in tasks if curr_day in days}
def parse_tasks(tasks):
result = {}
for task in tasks.splitlines():
value, name = task.split(maxsplit=1)
name = name.strip()
result[name] = value[1] == 'x'
return result
def generate_tasks(value):
return '\n'.join(f'[{"x" if v else "-"}] {k}' for k,v in value.items())
def create_sticky(journal, date):
yesterday = format_date(parse_date(date) - timedelta(days=1))
if day := journal['days'].get(yesterday):
if sticky := day['header'].get('sticky'):
return sticky
### ENTRY MODULES
def parse_post(block):
block = block.removeprefix('@post').strip()
try:
timestamp = int(parse_timestamp(block[:19]).timestamp())
block = block[19:]
except:
timestamp = None
content = block.strip()
result = {}
if content:
result['content'] = content
if timestamp:
result['timestamp'] = timestamp
return result
def generate_post(block):
result = '@post'
if ts := block.get('timestamp'):
result += f' {format_timestamp(ts)}'
if content := block.get('content'):
result += f' {content}'
return wrap_text(result)
def parse_notes(block):
tag, source, title = block.splitlines()
return {'source': source, 'title': title}
def generate_notes(block):
parts = ['@notes']
if source := block.get('source'):
parts.append(source)
if title := block.get('title'):
parts.append(title)
return '\n'.join(parts)
def parse_diet(block):
tag, amount, food = block.split()
amount = int(amount.removesuffix('g'))
return {'amount': amount, 'food': food}
def generate_diet(block):
_, amount, food = block.values()
return f'@diet {amount}g {food}'
def parse_timer(block):
tag, *rest = block.split()
name = None
timestamp = None
if len(rest) > 2:
name, *rest = rest
if len(rest) > 1:
timestamp = int(parse_timestamp(' '.join(rest)).timestamp())
result = {}
if name:
result['name'] = name
if timestamp:
result['timestamp'] = timestamp
return result
def generate_timer(block):
parts = [f'@{block["type"]}']
if name := block.get('name'):
parts.append(name)
if ts := block.get('timestamp'):
parts.append(format_timestamp(ts))
return ' '.join(parts)
def parse_exercise(block):
tag, *parts = block.split()
if parts[0] == 'walk':
kind, minutes, distance, steps = parts
return {
'kind': kind,
'minutes': int(minutes.removesuffix('min')),
'distance': float(distance.removesuffix('km')),
'steps': int(steps.removesuffix('steps')),
}
elif parts[0] == 'calisthenics':
kind, split, exercise = parts
sets, reps = split.split('x')
return {
'kind': kind,
'reps': reps,
'sets': sets,
'exercise': exercise,
}
assert False
def generate_exercise(block):
if block['kind'] == 'walk':
return f'@exercise walk {block["minutes"]}min {block["distance"]}km {block["steps"]}steps'
elif block['kind'] == 'calisthenics':
return f'@exercise calisthenics {block["sets"]}x{block["reps"]} {block["exercise"]}'
assert False
def parse_notify(block):
tag, day, *rest = block.split()
return {'day': day.strip(), 'message': ' '.join(rest)}
def generate_notify(block):
return f'@notify {block["day"]} {block["message"]}'
def generate_default(block):
return f'@{block["type"]} {block["value"]}'
def generate_info(block):
return wrap_text(f'@info {block["value"]}')
### PAGE FUNCTIONS
def create_header(journal, date):
return {
'godword': create_godword(journal, date),
'habits': create_habits(journal, date),
'notifications': create_notifications(journal, date),
'tasks': create_tasks(journal, date),
'sticky': create_sticky(journal, date),
}
def parse_header(header):
header_modules = {
'godword': parse_godword,
'habits': parse_habits,
'notifications': parse_notifications,
'tasks': parse_tasks,
'sticky': identity,
}
def split_into_blocks(text):
return [b.strip() for b in re.split(r'\n{2,}', text) if b.strip() != '']
modules = split_into_blocks(header)
result = {}
for module in modules:
name, value = module.split('\n', maxsplit=1)
name = name.lower().removesuffix(':')
result[name] = header_modules[name](value)
return result
def generate_header(header):
header_modules = {
'godword': generate_godword,
'habits': generate_habits,
'notifications': generate_notifications,
'tasks': generate_tasks,
'sticky': identity,
}
result = ''
for name, value in header.items():
if not value:
continue
result += f'\n\n{name.title()}:\n'
result += header_modules[name](value)
return result
def create_entry(journal, date):
return {
'timestamp': int(datetime.now().timestamp()),
'blocks': []
}
def parse_entry(entry):
def create_entry_module_parser(name, handler=None):
handler = handler or (lambda b: {'value': b.removeprefix(f'@{name} ')})
return (name, lambda b: {'type': name} | handler(b))
entry_modules = dict([
create_entry_module_parser('hide', lambda _: {}),
create_entry_module_parser('post', parse_post),
create_entry_module_parser('info'),
create_entry_module_parser('notes', parse_notes),
create_entry_module_parser('behavior'),
create_entry_module_parser('diet', parse_diet),
create_entry_module_parser('task'),
create_entry_module_parser('start', parse_timer),
create_entry_module_parser('stop', parse_timer),
create_entry_module_parser('done', parse_timer),
create_entry_module_parser('exercise', parse_exercise),
create_entry_module_parser('notify', parse_notify),
])
def merge_notes_block(l):
res = []
i = 0
while i < len(l):
if l[i] == '@notes':
# notes nl source nl title
res.append('\n'.join([l[i], l[i+2], l[i+4]]))
i += 5
else:
res.append(l[i])
i += 1
return res
def merge_wrapped_lines(l):
TIMESTAMP_LENGTH = len('2020-02-02 02:02:02 ')
COLUMN_LIMIT = 80
res = []
i = 0
while i < len(l):
curr = l[i]
prev = l[i-1] if i > 0 else None
next = l[i+1] if i+1 < len(l) else None
# ['aoeu', '\n', 'aoeu']
if prev and curr == '\n' and next:
len_prev = len(prev)
# first block is preceded by timestamp
if i - 1 == 0:
len_prev += TIMESTAMP_LENGTH
# do not wrap indented lines
if not next[0].isspace():
next_word = next.split()[0]
# merge only if text is actually wrapped
if len_prev + len(next_word) >= COLUMN_LIMIT:
res[-1] += ' ' + next
i += 2
continue
res.append(curr)
i += 1
return res
def split_into_blocks(text):
return reduce(flip(apply), [
# split the text into sections by newline and tag symbol, keeping the separators
partial(split_keep, ('\n', '@')),
# merge sequential newlines together into a single whitespace block
partial(merge_if, lambda p, c: p == c == '\n'),
# attach escaped tags
partial(merge_if, lambda p, c: c == '@' and p[-1] == '\\'),
# attach tag
partial(merge_if, lambda p, c: p == '@'),
# attach tags which do not come after newline or another tag
partial(merge_if, lambda p, c: c[0] == '@' and not (not p[-1] != '\n' or (p[0] == '@' and p[-1] == ' '))),
# merge notes block
merge_notes_block,
# strip all non-whitespace blocks
partial(map, lambda s: s if s.isspace() else s.rstrip()), list,
# merge escaped tags with following text
partial(merge_if, lambda p, c: p.endswith('\\@')),
# merge wrapped lines
merge_wrapped_lines,
# remove trailing whitespace block
lambda b: b if b and not all(c == '\n' for c in b[-1]) else b[:-1],
], text)
def parse_module_block(block):
tag = block.split()[0][1:]
return entry_modules[tag](block)
def parse_block(block):
if block.startswith('@'):
return parse_module_block(block)
else:
return block
timestamp, content = entry
return {
'timestamp': int(parse_timestamp(timestamp.strip()).timestamp()),
'blocks': [parse_block(b) for b in split_into_blocks(content)],
}
def generate_entry(entry):
entry_modules = {
'diet': generate_diet,
'exercise': generate_exercise,
'hide': lambda _: '@hide',
'post': generate_post,
'info': generate_info,
'notes': generate_notes,
'behavior': generate_default,
'task': generate_default,
'start': generate_timer,
'stop': generate_timer,
'done': generate_timer,
'notify': generate_notify,
}
def format_block(block, is_first):
def format_text(text):
if all(c == '\n' for c in block):
return text
DUMMY_TS = '2020-02-02 02:02:02 '
if is_first:
text = DUMMY_TS + text
length = len(text)
if length > 80:
text = wrap_text(text)
if is_first:
text = text.removeprefix(DUMMY_TS)
return text
def format_module(module):
return entry_modules[module['type']](module)
formatted = format_text(block) if isinstance(block, str) else format_module(block)
if result[-1] != '\n' and not all(c == '\n' for c in formatted):
formatted = ' ' + formatted
return formatted
result = f'\n\n{format_timestamp(entry["timestamp"])}'
for i, block in enumerate(entry['blocks']):
result += format_block(block, i == 0)
return result
def create_day(journal, date):
return {
'title': date,
'header': create_header(journal, date),
'entries': []
}
def parse_day(text):
ENTRY_RE = re.compile(r'^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) ?', re.MULTILINE)
header, *tmp = ENTRY_RE.split(text)
entries = list(zip(tmp[::2], tmp[1::2]))
title, *header = header.split('\n', maxsplit=1)
header = header[0] if len(header) else ''
return {
'title': title.removeprefix('# '),
'header': parse_header(header),
'entries': [parse_entry(e) for e in entries],
}
def generate_day(page):
result = f'# {page["title"]}'
result += generate_header(page['header'])
for entry in page['entries']:
result += generate_entry(entry)
result += '\n'
return result
### COMMAND UTILS
def import_journal(fpath):
result = { 'days': {}, 'files': {} }
for name in list(sorted(fpath.glob('*.md'))):
day = parse_day(name.read_text())
result['days'][name.stem] = day
for fname in ['habits', 'godword', 'tasks', 'foods']:
result['files'][fname] = (fpath / fname).read_text()
return result
def export_journal(journal, fpath):
for day in journal['days'].values():
(fpath / (day['title'] + '.md')).write_text(generate_day(day))
for fname, content in journal['files'].items():
(fpath / fname).write_text(content)
def backup_journal():
print('Creating backup...')
tmpdir = mkdtemp()
journal = load_journal()
export_journal(journal, Path(tmpdir))
copyfile(str(JOURNAL_PATH), tmpdir + '/journal.json')
files = Path(tmpdir).glob('*')
current_date = datetime.now().strftime('%Y-%m-%d')
zipfile_path = Path(f'{current_date}.zip')
zipfile = ZipFile(zipfile_path, 'w')
for file in files:
zipfile.write(file, arcname=file.name)
rmtree(tmpdir)
if script := journal['files'].get('backup'):
print('Found script, running...')
run(['bash', '-c', script.replace('%ZIPFILE%', f"'{str(zipfile_path.absolute())}'")])
if prompt('Ran script, delete archive?'):
zipfile_path.unlink()
def open_journal(date):
journal = load_journal()
if not journal['days'].get(date):
backup_journal()
journal['days'][date] = create_day(journal, date)
tmpdir = Path(mkdtemp())
export_journal(journal, tmpdir)
while True:
try:
editor(tmpdir / f'{date}.md')
new_journal = import_journal(tmpdir)
break
except Exception as e:
print('Error:', e)
input('Press enter to try again...')
save_journal(new_journal)
def load_journal():
if JOURNAL_PATH.exists():
return json.loads(JOURNAL_PATH.read_text())
else:
return import_journal(Path.home() / 'workspace' / 'journal')
def save_journal(journal):
JOURNAL_PATH.write_text(json.dumps(journal))
### COMMAND HANDLERS
def handle_open(args):
subcommand = nth_or_default(0, args, 'today')
if date := evaluate_time_expression(subcommand):
open_journal(format_date(date))
else:
print(f'Invalid subcommand: {subcommand}')
def handle_edit(args):
subcommand = nth_or_default(0, args, 'foods')
journal = load_journal()
if subcommand in journal['files']:
journal['files'][subcommand] = edit(journal['files'][subcommand])
elif prompt(f'Unknown file: {subcommand}, create new?'):
journal['files'][subcommand] = edit('')
save_journal(journal)
def handle_import(args):
if len(args) < 1:
print('Missing directory.')
return
path = Path(args[0])
if not path.is_dir():
print(f'Invalid directory: {path}')
return
save_journal(import_journal(path))
def handle_export(args):
if len(args) < 1:
print('Missing directory.')
return
path = Path(args[0])
if not path.is_dir():
print(f'Invalid directory: {path}')
return
export_journal(load_journal(), path)
def handle_test(args):
journal = load_journal()
journal_orig = deepcopy(journal)
for day in journal['days']:
journal['days'][day] = parse_day(generate_day(journal['days'][day]))
if journal != journal_orig:
print('Test failed!')
print('Dumping journal.fail.json and journal.fail.orig.json...')
Path('journal.fail.json').write_text(json.dumps(journal, indent=4))
Path('journal.fail.orig.json').write_text(json.dumps(journal_orig, indent=4))
else:
print('Test passed!')
def handle_summary(args):
subcommand = nth_or_default(0, args, 'today')
date = evaluate_time_expression(subcommand)
if not date:
print(f'Invalid time expression: {subcommand}')
return
date = format_date(date)
journal = load_journal()
foods, recipes = parse_foods_file(journal['files']['foods'])
daily_grams = 0.0
daily_calories = 0.0
daily_protein = 0.0
for entry in journal['days'][date]['entries']:
has_printed = False
entry_calories = 0.0
entry_protein = 0.0
for diet in (b for b in entry['blocks'] if type(b) != str and b['type'] == 'diet'):
if not has_printed:
print(f'-- {format_timestamp(entry["timestamp"])}')
has_printed = True
value = diet['amount']
name = diet['food']
if name in recipes:
food = recipes[name]
if value == 0.0:
value = food['TOTAL']
food = {k: v*(value/food['TOTAL']) for k,v in food.items()}
elif name in foods:
if value == 0.0:
value = 100
food = {k: v*(value/100.0) for k,v in foods[name].items()}
else:
print(f'ERROR: Invalid diet entry: {diet}')
continue
protein = round(food.get('Protein', 0.0), 2)
calories = round(food.get('Energy', 0.0), 2)
entry_calories += calories
entry_protein += protein
print(f'{name:<20} {value:<6}g, {calories:<6}kcal, {protein:<6}g protein')
if has_printed:
entry_calories = round(entry_calories, 2)
entry_protein = round(entry_protein, 2)
print(f'-- TOTAL: {entry_calories}kcal, {entry_protein}g protein')
print()
daily_calories += entry_calories
daily_protein += entry_protein
print(f'-- DAILY TOTAL ({daily_calories}kcal, {daily_protein}g protein)')
### MAIN
def main():
command = nth_or_default(1, sys.argv, 'open')
args = sys.argv[2:]
handler = {
'open': handle_open,
'edit': handle_edit,
'import': handle_import,
'export': handle_export,
'test': handle_test,
'summary': handle_summary,
'backup': lambda _: backup_journal(),
}.get(command, lambda _: print(f'Invalid command: {command}'))
handler(args)
if __name__ == '__main__':
main()

View File

@@ -1,98 +0,0 @@
from subprocess import run, Popen, DEVNULL
from datetime import datetime
from pathlib import Path
import random
import sys
import json
from common import format_timestamp
current_date = datetime.now().strftime('%Y-%m-%d')
current_time = datetime.now().strftime('%H:%M:%S')
journal_path = Path.home() / 'workspace' / 'journal'
target_page = journal_path / f'{current_date}.md'
script_path = Path(__file__).parent
habits = '\n'.join([f'[-] {x}'
for x in (journal_path / 'habits').read_text().strip().split('\n')])
words = (journal_path / 'godword').read_text().strip().split('\n')
godword = '\n'.join(' '.join(random.choice(words)
for __ in range(10)) for _ in range(2))
def parse_tasks_file():
result = []
tasks = (journal_path / 'tasks').read_text().splitlines()
for task in tasks:
days, name = task.split(':')
days = days.split(',')
result.append((days, name))
return result
tasks_file = parse_tasks_file()
if not target_page.exists():
Popen(
['bash', str(script_path / 'backup-script.sh'), current_date+'.zip'],
cwd=str(journal_path), stdout=DEVNULL, stderr=DEVNULL
)
journal = json.load(open(script_path / 'journal.json'))
notifications = []
for day, v in journal.items():
for entry in v.get('entries'):
for block in entry['blocks']:
if not isinstance(block, str) and block['type'] == 'notify':
if block['day'] == current_date:
notifications.append((
format_timestamp(entry['timestamp']),
block['message']
))
tasks = []
curr_day = {
0: 'mo', 1: 'tu', 2: 'we', 3: 'th',
4: 'fr', 5: 'sa', 6: 'su',
}[datetime.now().weekday()]
for days, name in tasks_file:
if curr_day in days:
tasks.append(name)
parts = [
f'# {target_page.stem}',
f'Godword:\n{godword}',
f'Habits:\n{habits}',
]
if notifications:
notifications_rendered = '\n'.join(
f'[[{entry}]] {message}'
for entry, message in notifications
)
parts.append(f'Notifications:\n{notifications_rendered}')
if tasks:
tasks_rendered = '\n'.join(
f'[-] {task}'
for task in tasks
)
parts.append(f'Tasks:\n{tasks_rendered}')
header = '\n\n'.join(parts) + '\n'
target_page.write_text(header)
with open(target_page, 'a') as fp:
fp.write(f'\n{current_date} {current_time} ')
run(['nvim', str(target_page), '+'])

324
parse.py
View File

@@ -1,324 +0,0 @@
from pathlib import Path
from datetime import datetime
import re
import json
entry_re = re.compile(r'^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) ?', re.MULTILINE)
curr_day = ''
def parse_godword(godword):
return godword.split()
def parse_habits(habits):
result = {}
for habit in habits.splitlines():
value, name = habit.split(maxsplit=1)
name = name.strip()
result[name] = value[1] == 'x'
return result
def parse_notifications(notifications):
result = []
for notification in notifications.splitlines():
parts = notification.split()
result.append({
'source': ' '.join(parts[0:2]).strip('[]'),
'message': ' '.join(parts[2:]),
})
return result
def parse_tasks(tasks):
result = {}
for task in tasks.splitlines():
value, name = task.split(maxsplit=1)
name = name.strip()
result[name] = value[1] == 'x'
return result
header_modules = {
'godword': parse_godword,
'habits': parse_habits,
'notifications': parse_notifications,
'tasks': parse_tasks,
}
def parse_header(header):
result = {}
def split_into_blocks(text):
return [b.strip() for b in re.split(r'\n{2,}', text) if b.strip() != '']
title, *modules = split_into_blocks(header)
for module in modules:
name, value = module.split('\n', maxsplit=1)
name = name.lower().removesuffix(':')
result[name] = header_modules[name](value)
return result
def parse_timestamp(timestamp):
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
def parse_post(block):
block = block.removeprefix('@post').strip()
try:
timestamp = int(parse_timestamp(block[:19]).timestamp())
block = block[19:]
except:
timestamp = None
content = block.strip()
result = {}
if content:
result['content'] = content
if timestamp:
result['timestamp'] = timestamp
return result
def parse_notes(block):
tag, source, title = block.splitlines()
return {'source': source, 'title': title}
def parse_diet(block):
tag, amount, food = block.split()
amount = int(amount.removesuffix('g'))
return {'amount': amount, 'food': food}
def parse_timer(block):
tag, *rest = block.split()
name = None
timestamp = None
if len(rest) > 2:
name, *rest = rest
if len(rest) > 1:
timestamp = int(parse_timestamp(' '.join(rest)).timestamp())
result = {}
if name:
result['name'] = name
if timestamp:
result['timestamp'] = timestamp
return result
def parse_exercise(block):
tag, *parts = block.split()
if parts[0] == 'walk':
kind, minutes, distance, steps = parts
return {
'kind': kind,
'minutes': int(minutes.removesuffix('min')),
'distance': float(distance.removesuffix('km')),
'steps': int(steps.removesuffix('steps')),
}
elif parts[0] == 'calisthenics':
kind, split, exercise = parts
sets, reps = split.split('x')
return {
'kind': kind,
'reps': reps,
'sets': sets,
'exercise': exercise,
}
assert False
def parse_notify(block):
tag, day, *rest = block.split()
return {'day': day.strip(), 'message': ' '.join(rest)}
def create_entry_module_parser(name, handler=None):
handler = handler or (lambda b: {'value': b.removeprefix(f'@{name} ')})
return lambda b: {'type': name} | handler(b)
entry_modules = {
'hide': create_entry_module_parser('hide', lambda _: {}),
'post': create_entry_module_parser('post', parse_post),
'info': create_entry_module_parser('info'),
'notes': create_entry_module_parser('notes', parse_notes),
'behavior': create_entry_module_parser('behavior'),
'diet': create_entry_module_parser('diet', parse_diet),
'task': create_entry_module_parser('task'),
'start': create_entry_module_parser('start', parse_timer),
'stop': create_entry_module_parser('stop', parse_timer),
'done': create_entry_module_parser('done', parse_timer),
'exercise': create_entry_module_parser('exercise', parse_exercise),
'notify': create_entry_module_parser('notify', parse_notify),
}
from functools import reduce, partial
def split_keep(delims, string):
res = []
buf = []
for c in string:
if c in delims:
if buf:
res.append(''.join(buf))
res.append(c)
buf = []
else:
buf.append(c)
if buf:
res.append(''.join(buf))
return res
assert split_keep(['@', '\n'], 'hello @world\n\nabout') == ['hello ', '@', 'world', '\n', '\n', 'about']
def merge_chars(chars, l):
res = []
for i in l:
if i in chars and res and all(c == i for c in res[-1]):
res[-1] += i
else:
res.append(i)
return res
assert merge_chars('\n', ['\n', '\n', 'hello', 'world', '\n', '\n']) == ['\n\n', 'hello', 'world', '\n\n']
def attach_to_next(c, l):
l = l.copy()
try:
while True:
i = l.index(c)
l[i+1] = c + l[i+1]
l.pop(i)
except:
pass
return l
assert attach_to_next('@', ['aoeu', '@', 'oeu']) == ['aoeu', '@oeu']
def attach_to_prev_if(pred, l):
res = []
for i, curr in enumerate(l):
prev = l[i-1] if i-1 >= 0 else None
if prev and pred(prev, curr):
res[-1] += curr
else:
res.append(curr)
return res
assert attach_to_prev_if(lambda p, c: p[-1] != '\n' and c[0] == '@', ['aoeu', '@oeu']) == ['aoeu@oeu']
def merge_notes_block(l):
res = []
i = 0
while i < len(l):
if l[i] == '@notes':
# notes nl source nl title
res.append('\n'.join([l[i], l[i+2], l[i+4]]))
i += 5
else:
res.append(l[i])
i += 1
return res
def merge_wrapped_lines(l):
res = []
i = 0
while i < len(l):
curr = l[i]
prev = l[i-1] if i > 0 else None
next = l[i+1] if i+1 < len(l) else None
if prev and next and curr == '\n':
len_prev = len(prev)
if i == 1:
len_prev += len('2020-02-02 02:02:02 ')
if not next[0].isspace():
next_word = next.split()[0]
if len_prev + len(next_word) >= 80:
res[-1] += ' ' + next
i += 2
continue
res.append(curr)
i += 1
return res
def apply(f, x):
return f(x)
def flip(f):
return lambda a1, a2: f(a2, a1)
def parse_entry(entry):
result = {}
def split_into_blocks(text):
r = reduce(flip(apply), [
# split the text into sections by newline and tag symbol, keeping the separators
partial(split_keep, ('\n', '@')),
# merge sequential newlines together into a single whitespace block
partial(merge_chars, '\n'),
# attach escaped tag symbols
partial(attach_to_prev_if, lambda p, c: c == '@' and p[-1] == '\\'),
# ?????
partial(attach_to_prev_if, lambda p, c: p.endswith('\\@')),
# attach tag symbols
partial(attach_to_next, '@'),
# ???
partial(attach_to_prev_if, lambda p, c: p[-1] != '\n' and not (p[0] == '@' and p[-1] == ' ') and c[0] == '@'),
# yes
merge_notes_block,
# strip all non-whitespace blocks
partial(map, lambda s: s if s.isspace() else s.rstrip()), list,
# yes
merge_wrapped_lines,
# remove trailing whitespace block
lambda b: b if b and not all(c == '\n' for c in b[-1]) else b[:-1],
], text)
return r
timestamp, content = entry
result['timestamp'] = int(parse_timestamp(timestamp.strip()).timestamp())
result['blocks'] = []
for b in split_into_blocks(content):
if b.startswith('@'):
tag = b.split()[0][1:]
result['blocks'].append(entry_modules[tag](b))
else:
result['blocks'].append(b)
return result
def parse_page(text):
header, *tmp = entry_re.split(text)
entries = list(zip(tmp[::2], tmp[1::2]))
return {
'header': parse_header(header),
'entries': [parse_entry(e) for e in entries],
}
if __name__ == '__main__':
result = {}
for fpath in list(sorted((Path.home() / 'workspace' / 'journal').glob('*.md'))):
day = parse_page(fpath.read_text())
result[fpath.stem] = day
script_path = Path(__file__).parent
with open(script_path / 'journal.json', 'w') as fp:
json.dump(result, fp, indent=4, ensure_ascii=False)

View File

@@ -1,77 +0,0 @@
import sys
from collections import defaultdict
from datetime import datetime, timedelta
import math
from common import parse_timestamp
content = open(sys.argv[1]).read().strip()
lines = content.splitlines()
current_chapter = ''
total_chapters = 0
completed_chapters = set()
today = datetime.now().replace(hour=0,minute=0,second=0,microsecond=0)
this_week = today - timedelta(days=7)
total_hours = 0.0
day_hours = 0.0
week_hours = 0.0
oldest_timestamp = datetime.now()
i = 0
while i < len(lines):
line = lines[i].strip()
if line.startswith('#'):
current_chapter = line[line.find(' ')+1:]
total_chapters += 1
else:
completed_chapters.add(current_chapter)
if line.startswith('@start'):
start = parse_timestamp(line.removeprefix('@start '))
if start < oldest_timestamp:
oldest_timestamp = start
i += 1
line = lines[i].strip()
end = parse_timestamp(line.removeprefix('@stop '))
delta = end - start
hours = delta.seconds / 60 / 60
total_hours += hours
if start > this_week:
week_hours += hours
if start > today:
day_hours += hours
i += 1
completed_chapters = len(completed_chapters)
num_days = (datetime.now() - oldest_timestamp).days or 1
hours_per_day = total_hours / num_days
hours_per_chapter = total_hours / completed_chapters
hours_to_completion = hours_per_chapter * (total_chapters - completed_chapters)
days_to_completion = math.ceil(hours_to_completion / hours_per_day)
completion_date = datetime.now() + timedelta(days=days_to_completion)
completion_percentage = completed_chapters/total_chapters*100
print(f'Started on: {oldest_timestamp.strftime("%Y-%m-%d")}')
print(f'Progress: [{completed_chapters}/{total_chapters}] ({round(completion_percentage, 2)}%)')
print(f'Total: {round(total_hours, 2)}h')
print(f'Week: {round(week_hours, 2)}h')
print(f'Day: {round(day_hours, 2)}h')
print(f'Hours per day: {round(hours_per_day, 2)}h')
print(f'Hours to completion: {round(hours_to_completion, 2)}h')
print(f'Completion date: {completion_date.strftime("%Y-%m-%d")}')

View File

@@ -1,25 +0,0 @@
from subprocess import run
import sys
import json
from common import format_timestamp
journal = json.load(open('journal.json'))
matches = []
keyword = sys.argv[1].lower()
for day, obj in journal.items():
for entry in obj['entries']:
for block in entry['blocks']:
if isinstance(block, str):
if keyword in block.lower().split():
matches.append((format_timestamp(entry['timestamp']), block))
buf = ''
for (ts, c) in matches:
c = c.replace('\n', ' ').strip()
buf += (f'[[{ts}]] {c}')[:80] + '\n'
run(['nvim', '-'], input=buf.encode('utf-8'))

View File

@@ -1,63 +0,0 @@
import json
import sys
from common import parse_foods_file, format_timestamp
foods, recipes = parse_foods_file()
do_yesterday = len(sys.argv) > 1
day_index = -2 if do_yesterday else -1
journal = json.load(open('journal.json'))
current_day = list(journal)[day_index]
daily_grams = 0.0
daily_calories = 0.0
daily_protein = 0.0
for entry in journal[current_day]['entries']:
has_printed = False
entry_calories = 0.0
entry_protein = 0.0
for diet in (b for b in entry['blocks'] if type(b) != str and b['type'] == 'diet'):
if not has_printed:
print(f'-- {format_timestamp(entry["timestamp"])}')
has_printed = True
value = diet['amount']
name = diet['food']
if name in recipes:
food = recipes[name]
if value == 0.0:
value = food['TOTAL']
food = {k: v*(value/food['TOTAL']) for k,v in food.items()}
elif name in foods:
if value == 0.0:
value = 100
food = {k: v*(value/100.0) for k,v in foods[name].items()}
else:
print(f'ERROR: Invalid diet entry: {diet}')
continue
protein = round(food.get('Protein', 0.0), 2)
calories = round(food.get('Energy', 0.0), 2)
entry_calories += calories
entry_protein += protein
print(f'{name:<20} {value:<6}g, {calories:<6}kcal, {protein:<6}g protein')
if has_printed:
entry_calories = round(entry_calories, 2)
entry_protein = round(entry_protein, 2)
print(f'-- TOTAL: {entry_calories}kcal, {entry_protein}g protein')
print()
daily_calories += entry_calories
daily_protein += entry_protein
print(f'-- DAILY TOTAL ({daily_calories}kcal, {daily_protein}g protein)')

13
test.py
View File

@@ -1,13 +0,0 @@
from pathlib import Path
from generate import generate_page
from parse import parse_page
for fpath in list(sorted((Path.home() / 'workspace' / 'journal').glob('*.md'))):
text = fpath.read_text()
parsed = parse_page(text)
generated = generate_page(fpath.stem, parsed['header'], parsed['entries'])
for i, (l1, l2) in enumerate(zip(text.split('\n'), generated.split('\n'))):
if l1 != l2:
print('\n'.join([f'LINE NR: {i}\t\tFILE: {fpath.stem}', repr(l1), repr(l2)]))
breakpoint()