Compare commits
15 Commits
d4067f5dca
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dcafd88b86 | ||
|
|
3ef24d2bfe | ||
|
|
468701b220 | ||
|
|
22d1150801 | ||
|
|
1e352c7b06 | ||
|
|
02df9b9b5f | ||
|
|
6181fa29ea | ||
|
|
26232c60ae | ||
|
|
954e0c12af | ||
|
|
760d072982 | ||
|
|
7e72871904 | ||
|
|
ad9ead66d0 | ||
|
|
58f1aad525 | ||
|
|
96dda8ec5e | ||
|
|
bf589d94b5 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
**/*.csv
|
||||
.ipynb_checkpoints/
|
||||
__pycache__/
|
||||
diet
|
||||
journal.json
|
||||
111
analyze.py
111
analyze.py
@@ -1,111 +0,0 @@
|
||||
from collections import Counter
|
||||
from common import parse_foods_file, evaluate_food_entry
|
||||
import string
|
||||
import sys
|
||||
import json
|
||||
|
||||
journal = json.load(open('journal.json'))
|
||||
|
||||
foods, recipes = parse_foods_file()
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
value, name = sys.argv[1:]
|
||||
value = float(value.removesuffix('g'))
|
||||
|
||||
from pprint import pprint
|
||||
pprint(evaluate_food_entry(foods, recipes, value, name))
|
||||
|
||||
exit(0)
|
||||
|
||||
total_entries = 0
|
||||
total_words = 0
|
||||
word_frequency = Counter()
|
||||
|
||||
total_csv = [['day', 'entries', 'words']]
|
||||
daily_csv = [['day', 'entries', 'words', 'calories', 'carbs', 'fat', 'protein',
|
||||
'sugar']]
|
||||
entry_csv = [['timestamp', 'words']]
|
||||
words_csv = [['word', 'count']]
|
||||
|
||||
diet_csv = [[
|
||||
'timestamp', 'name', 'grams', 'calories', 'carbs', 'fat', 'protein',
|
||||
'saturated_fat', 'sugar', 'fiber'
|
||||
]]
|
||||
|
||||
for day, obj in journal.items():
|
||||
daily_entries = len(obj['entries'])
|
||||
daily_words = 0
|
||||
daily_calories = 0.0
|
||||
daily_protein = 0.0
|
||||
daily_carbs = 0.0
|
||||
daily_fat = 0.0
|
||||
daily_sugar = 0.0
|
||||
|
||||
for entry in obj['entries']:
|
||||
for block in entry['blocks']:
|
||||
if isinstance(block, str):
|
||||
words = ''.join(
|
||||
c if c in string.ascii_letters+"'" else ' '
|
||||
for c in block.lower()
|
||||
).split()
|
||||
|
||||
word_frequency.update(words)
|
||||
|
||||
entry_words = len(words)
|
||||
daily_words += entry_words
|
||||
elif block['type'] == 'diet':
|
||||
name = block['food']
|
||||
value = block['amount']
|
||||
food = evaluate_food_entry(foods, recipes, value, name)
|
||||
|
||||
diet_csv.append((
|
||||
entry['timestamp'],
|
||||
name,
|
||||
value,
|
||||
round(food.get('Energy', 0.0), 2),
|
||||
round(food.get('Carbs', 0.0), 2),
|
||||
round(food.get('Fat', 0.0), 2),
|
||||
round(food.get('Protein', 0.0), 2),
|
||||
round(food.get('SaturatedFat', 0.0), 2),
|
||||
round(food.get('Sugar', 0.0), 2),
|
||||
round(food.get('Fiber', 0.0), 2),
|
||||
))
|
||||
|
||||
daily_calories += food.get('Energy', 0.0)
|
||||
daily_protein += food.get('Protein', 0.0)
|
||||
daily_fat += food.get('Fat', 0.0)
|
||||
daily_carbs += food.get('Carbs', 0.0)
|
||||
daily_sugar += food.get('Sugar', 0.0)
|
||||
|
||||
entry_csv.append([entry['timestamp'], entry_words])
|
||||
|
||||
daily_macros = daily_protein + daily_fat + daily_carbs
|
||||
|
||||
daily_csv.append([
|
||||
day,
|
||||
daily_entries,
|
||||
daily_words,
|
||||
round(daily_calories, 2),
|
||||
round(100 * (daily_carbs / daily_macros) if daily_carbs else 0, 2),
|
||||
round(100 * (daily_fat / daily_macros) if daily_fat else 0, 2),
|
||||
round(100 * (daily_protein / daily_macros) if daily_protein else 0, 2),
|
||||
round(daily_protein, 2),
|
||||
round(daily_sugar, 2)
|
||||
])
|
||||
|
||||
total_entries += daily_entries
|
||||
total_words += daily_words
|
||||
|
||||
total_csv.append([day, total_entries, total_words])
|
||||
|
||||
words_csv += word_frequency.most_common()
|
||||
|
||||
def write_csv(fname, csv):
|
||||
with open(fname, 'w') as fp:
|
||||
fp.write('\n'.join(','.join(str(x) for x in row) for row in csv))
|
||||
|
||||
write_csv('data/total.csv', total_csv)
|
||||
write_csv('data/daily.csv', daily_csv)
|
||||
write_csv('data/entry.csv', entry_csv)
|
||||
write_csv('data/words.csv', words_csv)
|
||||
write_csv('data/diet.csv', diet_csv)
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
apack $1 *.md
|
||||
rclone copy $1 gdrive:/journal-backup/
|
||||
rclone copy $1 prodesk:/home/olari/journal-backup/
|
||||
rm $1
|
||||
107
common.py
107
common.py
@@ -1,107 +0,0 @@
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
def parse_timestamp(timestamp):
|
||||
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
|
||||
|
||||
def format_timestamp(timestamp):
|
||||
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
def parse_foods_file():
|
||||
path = Path.home() / 'workspace' / 'journal' / 'foods'
|
||||
text = path.read_text()
|
||||
foods_str, recipes_str = text.split('---')
|
||||
|
||||
def parse_macro(macro):
|
||||
if macro == '...':
|
||||
return ('INVALID', 0.0)
|
||||
|
||||
name, value = macro.split()
|
||||
value = float(value.removesuffix('g').removesuffix('kcal'))
|
||||
return (name, value)
|
||||
|
||||
foods = {
|
||||
macros[0]: dict(parse_macro(macro) for macro in macros[1:])
|
||||
for macros in [food.split('\n') for food in foods_str.strip().split('\n\n')]
|
||||
}
|
||||
|
||||
def combine_values(fst, snd):
|
||||
result = fst.copy()
|
||||
for k,v in snd.items():
|
||||
if k in fst:
|
||||
result[k] += v
|
||||
else:
|
||||
result[k] = v
|
||||
return result
|
||||
|
||||
recipes = {}
|
||||
|
||||
def evaluate_ingredients(ingredients):
|
||||
result = {}
|
||||
|
||||
total_weight = 0.0
|
||||
for ingredient in ingredients:
|
||||
k,v = parse_macro(ingredient)
|
||||
if k == 'TOTAL':
|
||||
result[k] = v
|
||||
continue
|
||||
else:
|
||||
total_weight += v
|
||||
|
||||
food = foods.get(k)
|
||||
total = 100.0
|
||||
if not food:
|
||||
food = recipes[k].copy()
|
||||
total = food['TOTAL']
|
||||
del food['TOTAL']
|
||||
|
||||
|
||||
for kk,vv in food.items():
|
||||
if kk not in result:
|
||||
result[kk] = 0.0
|
||||
|
||||
result[kk] += vv * (v/total)
|
||||
|
||||
if 'TOTAL' not in result:
|
||||
result['TOTAL'] = total_weight
|
||||
|
||||
return result
|
||||
|
||||
for ingredients in [recipe.split('\n') for recipe in recipes_str.strip().split('\n\n')]:
|
||||
recipes[ingredients[0]] = evaluate_ingredients(ingredients[1:])
|
||||
|
||||
def get_calories_from_macros(mm):
|
||||
calories = 0.0
|
||||
for k,v in mm.items():
|
||||
calories += v * {
|
||||
'Carbs': 4,
|
||||
'Fat': 9,
|
||||
'Protein': 4
|
||||
}.get(k, 0.0)
|
||||
return calories
|
||||
|
||||
#for k,v in foods.items():
|
||||
# print(round(v.get('Energy') - get_calories_from_macros(v)), k)
|
||||
|
||||
return foods, recipes
|
||||
|
||||
def evaluate_food_entry(foods, recipes, value, name):
|
||||
if name in recipes:
|
||||
food = recipes[name]
|
||||
|
||||
if value == 0.0:
|
||||
value = food['TOTAL']
|
||||
|
||||
food = {k: v*(value/food['TOTAL']) for k,v in food.items()}
|
||||
elif name in foods:
|
||||
if value == 0.0:
|
||||
value = 100
|
||||
|
||||
food = {k: v*(value/100.0) for k,v in foods[name].items()}
|
||||
else:
|
||||
breakpoint()
|
||||
print(f'ERROR: Invalid diet entry: {name}')
|
||||
assert False
|
||||
|
||||
return food
|
||||
|
||||
28
compile.py
28
compile.py
@@ -1,28 +0,0 @@
|
||||
from subprocess import run
|
||||
import sys
|
||||
import json
|
||||
|
||||
from common import format_timestamp
|
||||
|
||||
journal = json.load(open('journal.json'))
|
||||
|
||||
matches = []
|
||||
keywords = sys.argv[1:]
|
||||
|
||||
for day, obj in journal.items():
|
||||
for entry in obj['entries']:
|
||||
for block in entry['blocks']:
|
||||
if isinstance(block, str):
|
||||
words = block.lower().split()
|
||||
if any(kw in words for kw in keywords):
|
||||
matches.append((
|
||||
format_timestamp(entry['timestamp']),
|
||||
'\n\n'.join([b for b in entry if isinstance(b, str)])
|
||||
))
|
||||
|
||||
buf = ''
|
||||
|
||||
for (ts, c) in matches:
|
||||
buf += f'{ts}\n\n{c}\n\n'
|
||||
|
||||
run(['nvim', '-'], input=buf.encode('utf-8'))
|
||||
@@ -1,24 +0,0 @@
|
||||
from subprocess import run
|
||||
import sys
|
||||
|
||||
outline = run(
|
||||
['mutool', 'show', sys.argv[1], 'outline'],
|
||||
capture_output=True
|
||||
).stdout.decode('utf-8')
|
||||
|
||||
indent = 0
|
||||
last_quote_index = 0
|
||||
for line in outline.splitlines():
|
||||
quote_index = line.find('"')
|
||||
hash_index = line.find('#')
|
||||
|
||||
if quote_index > last_quote_index:
|
||||
indent += 1
|
||||
elif quote_index < last_quote_index:
|
||||
indent -= 1
|
||||
last_quote_index = quote_index
|
||||
|
||||
title = line[quote_index+1:line.find('"', quote_index+1)].strip()
|
||||
page = int(line[hash_index+1:line.find(',', hash_index+1)])
|
||||
|
||||
print(f'{"#"*indent} {title} ({page})')
|
||||
65
generate.py
65
generate.py
@@ -1,65 +0,0 @@
|
||||
from common import format_timestamp
|
||||
import json
|
||||
|
||||
def generate_godword(value):
|
||||
return f"{' '.join(value[:10])}\n{' '.join(value[10:])}"
|
||||
|
||||
def generate_habits(value):
|
||||
return '\n'.join(f'[{"x" if v else "-"}] {k}' for k,v in value.items())
|
||||
|
||||
header_modules = {
|
||||
'godword': generate_godword,
|
||||
'habits': generate_habits,
|
||||
}
|
||||
|
||||
def generate_diet(block):
|
||||
_, amount, food = block.values()
|
||||
return f'@diet {amount}g {food}'
|
||||
|
||||
def generate_exercise(block):
|
||||
if block['kind'] == 'walk':
|
||||
return f'@exercise {block["minutes"]}min {block["distance"]}km {block["steps"]}steps'
|
||||
|
||||
def generate_default(block):
|
||||
return f'@{block["type"]} {block["value"]}'
|
||||
|
||||
def generate_timer(block):
|
||||
return f'@{block["type"]} {format_timestamp(block["timestamp"])}'
|
||||
|
||||
entry_modules = {
|
||||
'diet': generate_diet,
|
||||
'exercise': generate_exercise,
|
||||
'hide': lambda _: '@hide',
|
||||
'post': generate_timer,
|
||||
}
|
||||
|
||||
journal = json.load(open('journal.json'))
|
||||
|
||||
for curr_day in journal:
|
||||
header, entries = journal[curr_day].values()
|
||||
|
||||
result = f'# {curr_day}\n'
|
||||
|
||||
for name, value in header.items():
|
||||
result += f'\n{name.title()}:\n'
|
||||
result += header_modules[name](value)
|
||||
result += '\n'
|
||||
|
||||
def format_block(block):
|
||||
if isinstance(block, str):
|
||||
return block
|
||||
else:
|
||||
return entry_modules[block['type']](block)
|
||||
|
||||
for entry in entries:
|
||||
result += f'\n{format_timestamp(entry["timestamp"])} '
|
||||
if len(entry['blocks']) == 1:
|
||||
result += f'{format_block(entry["blocks"][0])}\n'
|
||||
else:
|
||||
result += '\n'
|
||||
for block in entry['blocks']:
|
||||
result += f'\n{format_block(block)}\n'
|
||||
|
||||
print(result)
|
||||
|
||||
|
||||
1065
journal.py
Normal file
1065
journal.py
Normal file
File diff suppressed because it is too large
Load Diff
34
migrations/2021-06-27_post.py
Normal file
34
migrations/2021-06-27_post.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from shutil import copy
|
||||
import json
|
||||
|
||||
journal_path = Path.home() / '.journal.json'
|
||||
|
||||
copy(str(journal_path), str(journal_path.with_suffix('.bkp')))
|
||||
|
||||
journal = json.loads(journal_path.read_text())
|
||||
new_journal = deepcopy(journal)
|
||||
|
||||
for day in journal['days']:
|
||||
new_entries = []
|
||||
for entry in journal['days'][day]['entries']:
|
||||
new_blocks = []
|
||||
for block in entry['blocks']:
|
||||
if not isinstance(block, str) and block['type'] == 'post':
|
||||
new_blocks.append({
|
||||
'type': 'post',
|
||||
'timestamp': block.get('timestamp', entry['timestamp'] + 30)
|
||||
})
|
||||
|
||||
if content := block.get('content'):
|
||||
new_blocks.append(content)
|
||||
else:
|
||||
new_blocks.append(block)
|
||||
|
||||
entry['blocks'] = new_blocks
|
||||
new_entries.append(entry)
|
||||
|
||||
new_journal['days'][day]['entries'] = new_entries
|
||||
|
||||
journal_path.write_text(json.dumps(new_journal))
|
||||
34
migrations/2021-06-29_tag.py
Normal file
34
migrations/2021-06-29_tag.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from shutil import copy
|
||||
import json
|
||||
|
||||
journal_path = Path.home() / '.journal.json'
|
||||
|
||||
copy(str(journal_path), str(journal_path.with_suffix('.bkp')))
|
||||
|
||||
journal = json.loads(journal_path.read_text())
|
||||
new_journal = deepcopy(journal)
|
||||
|
||||
for day in journal['days']:
|
||||
new_entries = []
|
||||
for entry in journal['days'][day]['entries']:
|
||||
new_blocks = []
|
||||
for block in entry['blocks']:
|
||||
if not isinstance(block, str) and block['type'] == 'hide':
|
||||
if len(new_blocks) and not isinstance(new_blocks[-1], str) and \
|
||||
new_blocks[-1]['type'] != 'tag':
|
||||
new_blocks.append({'type': 'tag', 'value': ['hide']})
|
||||
elif not isinstance(block, str) and block['type'] == 'info':
|
||||
new_blocks.append({'type': 'tag', 'value': ['info']})
|
||||
new_blocks.append('\n')
|
||||
new_blocks.append(block['value'])
|
||||
else:
|
||||
new_blocks.append(block)
|
||||
|
||||
entry['blocks'] = new_blocks
|
||||
new_entries.append(entry)
|
||||
|
||||
new_journal['days'][day]['entries'] = new_entries
|
||||
|
||||
journal_path.write_text(json.dumps(new_journal))
|
||||
16
migrations/2021-07-06_fix.py
Normal file
16
migrations/2021-07-06_fix.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from shutil import copy
|
||||
import json
|
||||
|
||||
journal_path = Path.home() / '.journal.json'
|
||||
|
||||
copy(str(journal_path), str(journal_path.with_suffix('.bkp')))
|
||||
|
||||
journal = json.loads(journal_path.read_text())
|
||||
new_journal = deepcopy(journal)
|
||||
|
||||
for day in journal['days']:
|
||||
new_journal['days'][day]['entries'] = journal['days'][day]['entries'][0]
|
||||
|
||||
journal_path.write_text(json.dumps(new_journal))
|
||||
@@ -1,66 +0,0 @@
|
||||
from subprocess import run, Popen, DEVNULL
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import random
|
||||
import sys
|
||||
import json
|
||||
|
||||
from common import format_timestamp
|
||||
|
||||
current_date = datetime.now().strftime('%Y-%m-%d')
|
||||
current_time = datetime.now().strftime('%H:%M:%S')
|
||||
|
||||
journal_path = Path.home() / 'workspace' / 'journal'
|
||||
|
||||
target_page = journal_path / f'{current_date}.md'
|
||||
|
||||
script_path = Path(__file__).parent
|
||||
|
||||
habits = '\n'.join([f'[-] {x}'
|
||||
for x in (journal_path / 'habits').read_text().strip().split('\n')])
|
||||
|
||||
words = (journal_path / 'godword').read_text().strip().split('\n')
|
||||
godword = '\n'.join(' '.join(random.choice(words)
|
||||
for __ in range(10)) for _ in range(2))
|
||||
|
||||
if not target_page.exists():
|
||||
Popen(
|
||||
['bash', str(script_path / 'backup-script.sh'), current_date+'.zip'],
|
||||
cwd=str(journal_path), stdout=DEVNULL, stderr=DEVNULL
|
||||
)
|
||||
|
||||
journal = json.load(open(script_path / 'journal.json'))
|
||||
|
||||
notifications = []
|
||||
|
||||
for day, v in journal.items():
|
||||
for entry in v.get('entries'):
|
||||
for block in entry['blocks']:
|
||||
if not isinstance(block, str) and block['type'] == 'notify':
|
||||
if block['day'] == current_date:
|
||||
notifications.append((
|
||||
format_timestamp(entry['timestamp']),
|
||||
block['message']
|
||||
))
|
||||
|
||||
parts = [
|
||||
f'# {target_page.stem}',
|
||||
f'Godword:\n{godword}',
|
||||
f'Habits:\n{habits}',
|
||||
]
|
||||
|
||||
if notifications:
|
||||
notifications_rendered = '\n'.join(
|
||||
f'[[{entry}]] {message}'
|
||||
for entry, message in notifications
|
||||
)
|
||||
|
||||
parts.append(f'Notifications:\n{notifications_rendered}')
|
||||
|
||||
header = '\n\n'.join(parts) + '\n'
|
||||
target_page.write_text(header)
|
||||
|
||||
with open(target_page, 'a') as fp:
|
||||
fp.write(f'\n{current_date} {current_time} ')
|
||||
|
||||
run(['nvim', str(target_page), '+'])
|
||||
188
parse.py
188
parse.py
@@ -1,188 +0,0 @@
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
import json
|
||||
|
||||
entry_re = re.compile(r'^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', re.MULTILINE)
|
||||
|
||||
curr_day = ''
|
||||
|
||||
|
||||
def parse_godword(godword):
|
||||
return godword.split()
|
||||
|
||||
def parse_habits(habits):
|
||||
result = {}
|
||||
for habit in habits.splitlines():
|
||||
value, name = habit.split(maxsplit=1)
|
||||
name = name.strip()
|
||||
result[name] = value[1] == 'x'
|
||||
return result
|
||||
|
||||
def parse_notifications(notifications):
|
||||
result = []
|
||||
for notification in notifications.splitlines():
|
||||
parts = notification.split()
|
||||
result.append({
|
||||
'source': ' '.join(parts[0:2]).strip('[]'),
|
||||
'message': ' '.join(parts[2:]),
|
||||
})
|
||||
return result
|
||||
|
||||
|
||||
header_modules = {
|
||||
'godword': parse_godword,
|
||||
'habits': parse_habits,
|
||||
'notifications': parse_notifications,
|
||||
}
|
||||
|
||||
def parse_header(header):
|
||||
result = {}
|
||||
|
||||
def split_into_blocks(text):
|
||||
return [b.strip() for b in re.split(r'\n{2,}', text) if b.strip() != '']
|
||||
|
||||
title, *modules = split_into_blocks(header)
|
||||
|
||||
for module in modules:
|
||||
name, value = module.split('\n', maxsplit=1)
|
||||
name = name.lower().removesuffix(':')
|
||||
result[name] = header_modules[name](value)
|
||||
|
||||
return result
|
||||
|
||||
def parse_timestamp(timestamp):
|
||||
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
|
||||
|
||||
def parse_post(block):
|
||||
block = block.removeprefix('@post ')
|
||||
try:
|
||||
timestamp = int(parse_timestamp(block[:19]).timestamp())
|
||||
block = block[19:]
|
||||
except:
|
||||
timestamp = None
|
||||
|
||||
content = block.strip()
|
||||
|
||||
result = {}
|
||||
if content:
|
||||
result['content'] = content
|
||||
if timestamp:
|
||||
result['timestamp'] = timestamp
|
||||
return result
|
||||
|
||||
def parse_notes(block):
|
||||
tag, source, title = block.splitlines()
|
||||
return {'source': source, 'title': title}
|
||||
|
||||
def parse_diet(block):
|
||||
tag, amount, food = block.split()
|
||||
amount = int(amount.removesuffix('g'))
|
||||
return {'amount': amount, 'food': food}
|
||||
|
||||
def parse_timer(block):
|
||||
tag, *rest = block.split()
|
||||
|
||||
name = None
|
||||
timestamp = None
|
||||
if len(rest) > 2:
|
||||
name, *rest = rest
|
||||
if len(rest) > 1:
|
||||
timestamp = int(parse_timestamp(' '.join(rest)).timestamp())
|
||||
|
||||
result = {}
|
||||
if name:
|
||||
result['name'] = name
|
||||
if timestamp:
|
||||
result['timestamp'] = timestamp
|
||||
return result
|
||||
|
||||
def parse_exercise(block):
|
||||
tag, *parts = block.split()
|
||||
|
||||
if parts[0] == 'walk':
|
||||
kind, minutes, distance, steps = parts
|
||||
return {
|
||||
'kind': kind,
|
||||
'minutes': int(minutes.removesuffix('min')),
|
||||
'distance': float(distance.removesuffix('km')),
|
||||
'steps': int(steps.removesuffix('steps')),
|
||||
}
|
||||
|
||||
return {'kind': 'INVALID'}
|
||||
|
||||
def parse_notify(block):
|
||||
tag, day, *rest = block.split()
|
||||
|
||||
return {'day': day.strip(), 'message': ' '.join(rest)}
|
||||
|
||||
def create_entry_module_parser(name, handler=None):
|
||||
handler = handler or (lambda b: {'value': b.removeprefix(f'@{name} ')})
|
||||
return lambda b: {'type': name} | handler(b)
|
||||
|
||||
entry_modules = {
|
||||
'hide': create_entry_module_parser('hide', lambda _: {}),
|
||||
'post': create_entry_module_parser('post', parse_post),
|
||||
'info': create_entry_module_parser('info'),
|
||||
'notes': create_entry_module_parser('notes', parse_notes),
|
||||
'behavior': create_entry_module_parser('behavior'),
|
||||
'diet': create_entry_module_parser('diet', parse_diet),
|
||||
'task': create_entry_module_parser('task'),
|
||||
'start': create_entry_module_parser('start', parse_timer),
|
||||
'stop': create_entry_module_parser('stop', parse_timer),
|
||||
'done': create_entry_module_parser('done', parse_timer),
|
||||
'exercise': create_entry_module_parser('exercise', parse_exercise),
|
||||
'notify': create_entry_module_parser('notify', parse_notify),
|
||||
}
|
||||
|
||||
def parse_entry(entry):
|
||||
result = {}
|
||||
|
||||
def split_into_blocks(text):
|
||||
result = []
|
||||
|
||||
for block in re.split(r'\n{2,}', text):
|
||||
block = block.strip()
|
||||
if not block:
|
||||
continue
|
||||
|
||||
for i, module in enumerate(block.replace(' @', '\n@').split('\n@')):
|
||||
#module = module.strip().replace('\n', ' ')
|
||||
if i == 0:
|
||||
result.append(module)
|
||||
else:
|
||||
result.append('@'+module)
|
||||
|
||||
return result
|
||||
|
||||
timestamp, content = entry
|
||||
|
||||
result['timestamp'] = int(parse_timestamp(timestamp.strip()).timestamp())
|
||||
result['blocks'] = []
|
||||
|
||||
for b in split_into_blocks(content):
|
||||
if b[0] == '@':
|
||||
tag = b.split()[0][1:]
|
||||
result['blocks'].append(entry_modules[tag](b))
|
||||
else:
|
||||
result['blocks'].append(b)
|
||||
|
||||
return result
|
||||
|
||||
result = {}
|
||||
|
||||
for fpath in list(sorted((Path.home() / 'workspace' / 'journal').glob('*.md'))):
|
||||
curr_day = fpath.stem
|
||||
|
||||
header, *tmp = entry_re.split(fpath.read_text())
|
||||
entries = list(zip(tmp[::2], tmp[1::2]))
|
||||
|
||||
result[fpath.stem] = {
|
||||
'header': parse_header(header),
|
||||
'entries': [parse_entry(e) for e in entries],
|
||||
}
|
||||
|
||||
script_path = Path(__file__).parent
|
||||
|
||||
with open(script_path / 'journal.json', 'w') as fp:
|
||||
json.dump(result, fp, indent=4, ensure_ascii=False)
|
||||
77
progress.py
77
progress.py
@@ -1,77 +0,0 @@
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta
|
||||
import math
|
||||
|
||||
from common import parse_timestamp
|
||||
|
||||
content = open(sys.argv[1]).read().strip()
|
||||
|
||||
lines = content.splitlines()
|
||||
current_chapter = ''
|
||||
|
||||
total_chapters = 0
|
||||
completed_chapters = set()
|
||||
|
||||
today = datetime.now().replace(hour=0,minute=0,second=0,microsecond=0)
|
||||
this_week = today - timedelta(days=7)
|
||||
|
||||
total_hours = 0.0
|
||||
day_hours = 0.0
|
||||
week_hours = 0.0
|
||||
|
||||
oldest_timestamp = datetime.now()
|
||||
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
line = lines[i].strip()
|
||||
|
||||
if line.startswith('#'):
|
||||
current_chapter = line[line.find(' ')+1:]
|
||||
total_chapters += 1
|
||||
else:
|
||||
completed_chapters.add(current_chapter)
|
||||
|
||||
if line.startswith('@start'):
|
||||
start = parse_timestamp(line.removeprefix('@start '))
|
||||
|
||||
if start < oldest_timestamp:
|
||||
oldest_timestamp = start
|
||||
|
||||
i += 1
|
||||
line = lines[i].strip()
|
||||
|
||||
end = parse_timestamp(line.removeprefix('@stop '))
|
||||
delta = end - start
|
||||
hours = delta.seconds / 60 / 60
|
||||
|
||||
total_hours += hours
|
||||
if start > this_week:
|
||||
week_hours += hours
|
||||
if start > today:
|
||||
day_hours += hours
|
||||
|
||||
i += 1
|
||||
|
||||
completed_chapters = len(completed_chapters)
|
||||
|
||||
num_days = (datetime.now() - oldest_timestamp).days or 1
|
||||
hours_per_day = total_hours / num_days
|
||||
|
||||
hours_per_chapter = total_hours / completed_chapters
|
||||
hours_to_completion = hours_per_chapter * (total_chapters - completed_chapters)
|
||||
days_to_completion = math.ceil(hours_to_completion / hours_per_day)
|
||||
|
||||
completion_date = datetime.now() + timedelta(days=days_to_completion)
|
||||
|
||||
completion_percentage = completed_chapters/total_chapters*100
|
||||
|
||||
print(f'Started on: {oldest_timestamp.strftime("%Y-%m-%d")}')
|
||||
print(f'Progress: [{completed_chapters}/{total_chapters}] ({round(completion_percentage, 2)}%)')
|
||||
print(f'Total: {round(total_hours, 2)}h')
|
||||
print(f'Week: {round(week_hours, 2)}h')
|
||||
print(f'Day: {round(day_hours, 2)}h')
|
||||
print(f'Hours per day: {round(hours_per_day, 2)}h')
|
||||
print(f'Hours to completion: {round(hours_to_completion, 2)}h')
|
||||
print(f'Completion date: {completion_date.strftime("%Y-%m-%d")}')
|
||||
|
||||
25
search.py
25
search.py
@@ -1,25 +0,0 @@
|
||||
from subprocess import run
|
||||
import sys
|
||||
import json
|
||||
|
||||
from common import format_timestamp
|
||||
|
||||
journal = json.load(open('journal.json'))
|
||||
|
||||
matches = []
|
||||
keyword = sys.argv[1].lower()
|
||||
|
||||
for day, obj in journal.items():
|
||||
for entry in obj['entries']:
|
||||
for block in entry['blocks']:
|
||||
if isinstance(block, str):
|
||||
if keyword in block.lower().split():
|
||||
matches.append((format_timestamp(entry['timestamp']), block))
|
||||
|
||||
buf = ''
|
||||
|
||||
for (ts, c) in matches:
|
||||
c = c.replace('\n', ' ').strip()
|
||||
buf += (f'[[{ts}]] {c}')[:80] + '\n'
|
||||
|
||||
run(['nvim', '-'], input=buf.encode('utf-8'))
|
||||
63
summary.py
63
summary.py
@@ -1,63 +0,0 @@
|
||||
import json
|
||||
import sys
|
||||
|
||||
from common import parse_foods_file, format_timestamp
|
||||
foods, recipes = parse_foods_file()
|
||||
|
||||
do_yesterday = len(sys.argv) > 1
|
||||
day_index = -2 if do_yesterday else -1
|
||||
|
||||
journal = json.load(open('journal.json'))
|
||||
|
||||
current_day = list(journal)[day_index]
|
||||
|
||||
daily_grams = 0.0
|
||||
daily_calories = 0.0
|
||||
daily_protein = 0.0
|
||||
|
||||
for entry in journal[current_day]['entries']:
|
||||
has_printed = False
|
||||
entry_calories = 0.0
|
||||
entry_protein = 0.0
|
||||
for diet in (b for b in entry['blocks'] if type(b) != str and b['type'] == 'diet'):
|
||||
if not has_printed:
|
||||
print(f'-- {format_timestamp(entry["timestamp"])}')
|
||||
has_printed = True
|
||||
|
||||
value = diet['amount']
|
||||
name = diet['food']
|
||||
|
||||
if name in recipes:
|
||||
food = recipes[name]
|
||||
|
||||
if value == 0.0:
|
||||
value = food['TOTAL']
|
||||
|
||||
food = {k: v*(value/food['TOTAL']) for k,v in food.items()}
|
||||
elif name in foods:
|
||||
if value == 0.0:
|
||||
value = 100
|
||||
|
||||
food = {k: v*(value/100.0) for k,v in foods[name].items()}
|
||||
else:
|
||||
print(f'ERROR: Invalid diet entry: {diet}')
|
||||
continue
|
||||
|
||||
protein = round(food.get('Protein', 0.0), 2)
|
||||
calories = round(food.get('Energy', 0.0), 2)
|
||||
|
||||
entry_calories += calories
|
||||
entry_protein += protein
|
||||
|
||||
print(f'{name:<20} {value:<6}g, {calories:<6}kcal, {protein:<6}g protein')
|
||||
|
||||
if has_printed:
|
||||
entry_calories = round(entry_calories, 2)
|
||||
entry_protein = round(entry_protein, 2)
|
||||
print(f'-- TOTAL: {entry_calories}kcal, {entry_protein}g protein')
|
||||
print()
|
||||
|
||||
daily_calories += entry_calories
|
||||
daily_protein += entry_protein
|
||||
|
||||
print(f'-- DAILY TOTAL ({daily_calories}kcal, {daily_protein}g protein)')
|
||||
Reference in New Issue
Block a user