Changeset - 5d45171ed7c4
[Not reviewed]
0 1 0
Ben Sturmfels (bsturmfels) - 2 years ago 2022-02-09 23:15:01
ben@sturm.com.au
reconcile: Add statement total and cleared total.
1 file changed with 17 insertions and 0 deletions:
0 comments (0 inline, 0 general)
conservancy_beancount/reconcile/prototype_amex_reconciler.py
Show inline comments
...
 
@@ -18,24 +18,25 @@ Problems in scope:
 
 - adding statement/reconciliation metadata to books is manual and prone to mistakes
 
 - Beancount doesn't provide any infrastructure for programmatically updating the books, only appending
 
 - after updates to the books files, beancount must be restarted to reflect updates
 
 - updates also invalidate the cache meaning restart takes several minutes
 
 - paper checks are entered in the books when written, but may not be cashed until months later (reconcile errors)
 
 - balance checks are manually updated in svn/Financial/Ledger/sanity-check-balances.yaml
 
 - jumping to an individual transaction in a large ledger isn't trivial - Emacs grep mode is the current best option
 
 - Pam and other staff don't use Emacs
 
 - auditors would prefer Bradley didn't perform reconciliation, ideally not Rosanne either
 
 - transactions are entered manually and reconciled after the fact, but importing from statements may be useful in some cases
 

	
 
Q. How are reconciliation reports created currently? How do you read them?
 
 - by hand from copying and pasting from the helper tool output
 

	
 
TODO/ISSUES:
 
 - AMEX statement doesn't provide bank balance or running total
 

	
 
"""
 
import argparse
 
import csv
 
import datetime
 
import decimal
 
import os
 
from typing import Dict, List, Tuple
 

	
...
 
@@ -101,49 +102,62 @@ def records_match(r1: Dict, r2: Dict) -> Tuple[bool, str]:
 
        return True, f'Matched +/- 3 days, ignoring payee'
 
    else:
 
        return False, ''
 

	
 

	
 
parser = argparse.ArgumentParser(description='Reconciliation helper')
 
parser.add_argument('--beancount-file', required=True)
 
parser.add_argument('--csv-statement', required=True)
 
parser.add_argument('--account', required=True, help='eg. Liabilities:CreditCard:AMEX')
 
parser.add_argument('--grep-output-filename')
 
# parser.add_argument('--report-group-regex')
 
parser.add_argument('--show-reconciled-matches', action='store_true')
 
parser.add_argument('--statement-total', required=True)
 
args = parser.parse_args()
 

	
 
statement_total = decimal.Decimal(args.statement_total)
 

	
 
# TODO: Should put in a sanity check to make sure the statement you're feeding
 
# in matches the account you've provided.
 
if 'AMEX' in args.account:
 
    standardize_statement_record = standardize_amex_record
 
else:
 
    standardize_statement_record = standardize_fr_record
 

	
 
with open(args.csv_statement) as f:
 
    reader = csv.DictReader(f)
 
    statement_trans = sort_records([standardize_statement_record(row, reader.line_num) for row in reader])
 

	
 
begin_date = statement_trans[0]['date']
 
end_date = statement_trans[-1]['date']
 

	
 
# Do we traverse and filter the in-memory entries list and filter that, or do we
 
# use Beancount Query Language (BQL) to get a list of transactions? Currently
 
# using BQL.
 
#
 
# beancount.query.query_compile.compile() and
 
# beancount.query.query_execute.filter_entries() look useful in this respect,
 
# but I'm not clear on how to use compile(). An example would help.
 
entries, _, options = loader.load_file(args.beancount_file)
 

	
 
total_query = f"""SELECT sum(COST(position)) AS aa WHERE account = "{args.account}"
 
    AND date <= {end_date.isoformat()} AND META('bank-statement') != NULL"""
 
result_types, result_rows = run_query(
 
    entries,
 
    options,
 
    total_query,
 
    numberify=True,
 
)
 
cleared_total = result_rows[0][0]
 

	
 
# String concatenation looks bad, but there's no SQL injection possible here
 
# because BQL can't write back to the Beancount files. I hope!
 
query = f"SELECT filename, META('lineno') AS posting_line, META('bank-statement') AS posting_statement, date, number(cost(position)), payee, narration where account = '{args.account}' and date >= {begin_date} and date <= {end_date}"
 
result_types, result_rows = run_query(
 
    entries,
 
    options,
 
    query,
 
)
 

	
 
books_trans = sort_records([standardize_beancount_record(row) for row in result_rows])
 

	
 
num_statement_records = len(statement_trans)
...
 
@@ -174,24 +188,27 @@ for r1 in statement_trans:
 
            break
 
    else:
 
        matches.append([r1['date'], f'{format_record(r1)}  →  {" ":^45}  ✗ Not in books ({os.path.basename(args.csv_statement)}:{r1["line"]})'])
 
for r2 in books_trans:
 
    matches.append([r2['date'], f'{" ":^45}  →  {format_record(r2)}  ✗ Not on statement ({os.path.basename(r2["filename"])}:{r2["line"]})'])
 

	
 
print('-' * 155)
 
print(f'{"Statement transaction":<38}            {"Books transaction":<44}   Notes')
 
print('-' * 155)
 
for _, output in sorted(matches):
 
    print(output)
 
print('-' * 155)
 
print(f'STATEMENT TOTAL: {statement_total}')
 
print(f'CLEARED TOTAL:   {cleared_total:12,.2f}')
 
print('-' * 155)
 

	
 
# Write statement metadata back to books
 
if metadata_to_apply:
 
    print('Mark matched transactions as reconciled in the books? (y/N) ', end='')
 
    if input().lower() == 'y':
 
        files = {}
 
        # Query results aren't necessarily sequential in a file, so need to sort
 
        # so that our line number offsets work.
 
        for filename, line, metadata in sorted(metadata_to_apply):
 
            if filename not in files:
 
                with open(filename, 'r') as f:
 
                    # print(f'Opening {filename}.')
0 comments (0 inline, 0 general)