Changeset - 087b3274e7d2
[Not reviewed]
0 4 0
Brett Smith - 4 years ago 2021-01-19 19:23:51
brettcsmith@brettcsmith.org
ledger: Dedicated reporting tab for Expenses:Payroll.

To better accommodate the new payroll-type metadata.
4 files changed with 13 insertions and 2 deletions:
0 comments (0 inline, 0 general)
conservancy_beancount/reports/ledger.py
Show inline comments
 
"""ledger.py - General ledger report from Beancount
 

	
 
This tool produces a spreadsheet that shows postings in Beancount, organized
 
by account.
 

	
 
Specify the date range you want to report with the ``--begin`` and ``--end``
 
options.
 

	
 
Select the accounts you want to report with the ``--account`` option. You can
 
specify this option multiple times. The report will include at least one sheet
 
for each account you specify. Subaccounts will be reported on that sheet as
 
well.
 

	
 
Select the postings you want to report by passing metadata search terms in
 
``name=value`` format.
 

	
 
Run ``ledger-report --help`` for abbreviations and other options.
 

	
 
Examples
 
--------
 

	
 
Report all activity related to a given project::
 

	
 
    ledger-report project=NAME
 

	
 
Get all Assets postings for a given month to help with reconciliation::
 

	
 
    ledger-report -a Assets -b 2018-05-01 -e 2018-06-01
 
"""
 
# Copyright © 2020  Brett Smith
 
# License: AGPLv3-or-later WITH Beancount-Plugin-Additional-Permission-1.0
 
#
 
# Full copyright and licensing details can be found at toplevel file
 
# LICENSE.txt in the repository.
 

	
 
import argparse
 
import collections
 
import datetime
 
import enum
 
import itertools
 
import operator
 
import logging
 
import sys
 

	
 
from typing import (
 
    Any,
 
    Callable,
 
    Dict,
 
    Iterable,
 
    Iterator,
 
    List,
 
    Mapping,
 
    Optional,
 
    Sequence,
 
    Set,
 
    TextIO,
 
    Tuple,
 
    Union,
 
)
 
from ..beancount_types import (
 
    Transaction,
 
)
 

	
 
from pathlib import Path
 

	
 
import odf.table  # type:ignore[import]
 

	
 
from beancount.core import data as bc_data
 
from beancount.parser import printer as bc_printer
 

	
 
from . import core
 
from . import rewrite
 
from .. import books
 
from .. import cliutil
 
from .. import config as configmod
 
from .. import data
 
from .. import ranges
 
from .. import rtutil
 

	
 
PostTally = List[Tuple[int, data.Account]]
 

	
 
PROGNAME = 'ledger-report'
 
logger = logging.getLogger('conservancy_beancount.reports.ledger')
 

	
 
class LedgerODS(core.BaseODS[data.Posting, None]):
 
    CORE_COLUMNS: Sequence[str] = [
 
        'Date',
 
        data.Metadata.human_name('entity'),
 
        'Description',
 
        'Original Amount',
 
        'Booked Amount',
 
    ]
 
    ACCOUNT_COLUMNS: Dict[str, Sequence[str]] = collections.OrderedDict([
 
        ('Income', ['project', 'rt-id', 'receipt', 'income-type', 'memo']),
 
        ('Expenses:Payroll', ['project', 'rt-id', 'payroll-type', 'expense-type', 'receipt', 'approval']),
 
        ('Expenses', ['project', 'rt-id', 'receipt', 'approval', 'expense-type']),
 
        ('Equity', ['project', 'rt-id']),
 
        ('Assets:Receivable', ['project', 'rt-id', 'invoice', 'approval', 'contract', 'purchase-order']),
 
        ('Liabilities:Payable', ['project', 'rt-id', 'invoice', 'approval', 'contract', 'purchase-order']),
 
        ('Assets:PayPal', ['rt-id', 'paypal-id', 'receipt', 'approval']),
 
        ('Assets', ['rt-id', 'receipt', 'approval', 'bank-statement']),
 
        ('Liabilities', ['rt-id', 'receipt', 'approval', 'bank-statement']),
 
    ])
 
    CLASSIFICATION_COLUMN = "Account Classification"
 
    # Excel 2003 was limited to 65,536 rows per worksheet.
 
    # While we can probably count on all our users supporting more modern
 
    # formats (Excel 2007 supports over 1 million rows per worksheet),
 
    # keeping the default limit conservative seems good to avoid running into
 
    # other limits (like the number of hyperlinks per worksheet), plus just
 
    # better for human organization and readability.
 
    SHEET_SIZE = 65500
 

	
 
    def __init__(self,
 
                 start_date: datetime.date,
 
                 stop_date: datetime.date,
 
                 accounts: Optional[Sequence[str]]=None,
 
                 rt_wrapper: Optional[rtutil.RT]=None,
 
                 sheet_size: Optional[int]=None,
 
                 totals_with_entries: Optional[Sequence[str]]=None,
 
                 totals_without_entries: Optional[Sequence[str]]=None,
 
    ) -> None:
 
        if sheet_size is None:
 
            sheet_size = self.SHEET_SIZE
 
        if totals_with_entries is None:
 
            totals_with_entries = [s for s in self.ACCOUNT_COLUMNS if ':' not in s]
 
        if totals_without_entries is None:
 
            totals_without_entries = totals_with_entries
 
        super().__init__(rt_wrapper)
 
        self.date_range = ranges.DateRange(start_date, stop_date)
 
        self.sheet_size = sheet_size
 
        self.totals_with_entries = totals_with_entries
 
        self.totals_without_entries = totals_without_entries
 
        self.report_name = "Ledger"
 

	
 
        if accounts is None:
 
            self.accounts = set(data.Account.iter_accounts())
 
            self.required_sheet_names = list(self.ACCOUNT_COLUMNS)
 
        else:
 
            self.accounts = set()
 
            self.required_sheet_names = []
 
            for acct_spec in accounts:
 
                subaccounts = frozenset(data.Account.iter_accounts_by_hierarchy(acct_spec))
 
                if subaccounts:
 
                    self.accounts.update(subaccounts)
 
                    self._require_sheet(acct_spec)
 
                else:
 
                    account_roots_map = collections.defaultdict(list)
 
                    for account in data.Account.iter_accounts_by_classification(acct_spec):
 
                        self.accounts.add(account)
 
                        account_roots_map[account.root_part()].append(account)
 
                    if not account_roots_map:
 
                        raise ValueError("unknown account name or classification", acct_spec)
 
                    for root_part, accounts in account_roots_map.items():
 
                        start_count = min(account.count_parts() for account in accounts)
 
                        for count in range(start_count, 1, -1):
 
                            target = accounts[0].root_part(count)
 
                            if all(acct.root_part(count) == target for acct in accounts):
 
                                self._require_sheet(target)
 
                                break
 
                        else:
 
                            self._require_sheet(root_part)
 

	
 
    def _require_sheet(self, new_sheet: str) -> None:
 
        for index, sheet in enumerate(self.required_sheet_names):
 
            if new_sheet == sheet:
 
                break
 
            elif new_sheet.startswith(sheet):
 
                self.required_sheet_names.insert(index, new_sheet)
 
                break
 
        else:
 
            self.required_sheet_names.append(new_sheet)
 

	
 
    def init_styles(self) -> None:
 
        super().init_styles()
 
        self.amount_column = self.column_style(1.2)
 
        self.default_column = self.column_style(1.5)
 
        self.column_styles: Mapping[str, Union[str, odf.style.Style]] = {
 
            'Account': self.column_style(2),  # for TransactionODS
 
            'Date': '',
 
            'Description': self.column_style(2),
 
            'Original Amount': self.amount_column,
 
            'Booked Amount': self.amount_column,
 
            data.Metadata.human_name('project'): self.amount_column,
 
            data.Metadata.human_name('rt-id'): self.amount_column,
 
            self.CLASSIFICATION_COLUMN: self.column_style(3),
 
        }
 

	
 
    @classmethod
 
    def _group_tally(
 
            cls,
 
            tally_by_account: PostTally,
setup.py
Show inline comments
 
#!/usr/bin/env python3
 

	
 
from setuptools import setup
 

	
 
setup(
 
    name='conservancy_beancount',
 
    description="Plugin, library, and reports for reading Conservancy's books",
 
    version='1.15.4',
 
    version='1.15.5',
 
    author='Software Freedom Conservancy',
 
    author_email='info@sfconservancy.org',
 
    license='GNU AGPLv3+',
 

	
 
    install_requires=[
 
        'babel>=2.6',  # Debian:python3-babel
 
        'beancount>=2.2',  # Debian:beancount
 
        'GitPython>=2.0',  # Debian:python3-git
 
        # 1.4.1 crashes when trying to save some documents.
 
        'odfpy>=1.4.0,!=1.4.1',  # Debian:python3-odf
 
        'pdfminer.six>=20200101',
 
        'PyYAML>=3.0',  # Debian:python3-yaml
 
        'regex',  # Debian:python3-regex
 
        'rt>=2.0',
 
    ],
 
    setup_requires=[
 
        'pytest-mypy',
 
        'pytest-runner',  # Debian:python3-pytest-runner
 
    ],
 
    tests_require=[
 
        'mypy>=0.770',  # Debian:python3-mypy
 
        'pytest',  # Debian:python3-pytest
 
    ],
 

	
 
    packages=[
 
        'conservancy_beancount',
 
        'conservancy_beancount.pdfforms',
 
        'conservancy_beancount.pdfforms.extract',
 
        'conservancy_beancount.plugin',
 
        'conservancy_beancount.reports',
 
        'conservancy_beancount.tools',
 
    ],
 
    entry_points={
 
        'console_scripts': [
 
            'accrual-report = conservancy_beancount.reports.accrual:entry_point',
 
            'assemble-audit-reports = conservancy_beancount.tools.audit_report:entry_point',
 
            'balance-sheet-report = conservancy_beancount.reports.balance_sheet:entry_point',
 
            'budget-report = conservancy_beancount.reports.budget:entry_point',
 
            'bean-sort = conservancy_beancount.tools.sort_entries:entry_point',
 
            'extract-odf-links = conservancy_beancount.tools.extract_odf_links:entry_point',
 
            'fund-report = conservancy_beancount.reports.fund:entry_point',
 
            'ledger-report = conservancy_beancount.reports.ledger:entry_point',
 
            'opening-balances = conservancy_beancount.tools.opening_balances:entry_point',
 
            'pdfform-extract = conservancy_beancount.pdfforms.extract:entry_point',
 
            'pdfform-extract-irs990scheduleA = conservancy_beancount.pdfforms.extract.irs990scheduleA:entry_point',
 
            'pdfform-fill = conservancy_beancount.pdfforms.fill:entry_point',
 
            'split-ods-links = conservancy_beancount.tools.split_ods_links:entry_point',
 
        ],
 
    },
 
)
tests/books/ledger.beancount
Show inline comments
 
2018-01-01 open Equity:OpeningBalance
 
2018-01-01 open Assets:Checking
 
  classification: "Cash"
 
2018-01-01 open Assets:PayPal
 
  classification: "Cash"
 
2018-01-01 open Assets:Prepaid
 
  classification: "Prepaid expenses"
 
2018-01-01 open Assets:Receivable:Accounts
 
  classification: "Accounts receivable"
 
2018-01-01 open Expenses:Other
 
  classification: "Other expenses"
 
2018-01-01 open Expenses:Payroll
 
  classification: "Payroll expenses"
 
2018-01-01 open Income:Other
 
  classification: "Other income"
 
2018-01-01 open Liabilities:CreditCard
 
  classification: "Accounts payable"
 
2018-01-01 open Liabilities:Payable:Accounts
 
  classification: "Accounts payable"
 
2018-01-01 open Liabilities:UnearnedIncome
 
  classification: "Unearned income"
 

	
 
2018-02-28 * "Opening balance"
 
  Equity:OpeningBalance  -10,000 USD
 
  Assets:Checking         10,000 USD
 

	
 
2018-06-06 * "Accrued expense"
 
  project: "eighteen"
 
  Liabilities:Payable:Accounts  -60 USD
 
  Expenses:Other                 60 USD
 

	
 
2018-09-09 * "Paid expense"
 
  Liabilities:Payable:Accounts  60 USD
 
  project: "eighteen"
 
  Assets:Checking              -60 USD
 

	
 
2018-12-12 * "Accrued income"
 
  project: "eighteen"
 
  Assets:Receivable:Accounts  120 USD
 
  Income:Other               -120 USD
 

	
 
2019-03-03 * "Paid income"
 
  Assets:Receivable:Accounts  -120 USD
 
  project: "eighteen"
 
  Assets:Checking              120 USD
 

	
 
2019-06-06 * "Credit card expense"
 
  Liabilities:CreditCard  -65 USD
 
  Expenses:Other           65 USD
 
  project: "nineteen"
 

	
 
2019-09-09 * "Credit card paid"
 
  Liabilities:CreditCard  65 USD
 
  Assets:Checking        -65 USD
tests/test_reports_ledger.py
Show inline comments
 
"""test_reports_ledger.py - Unit tests for general ledger report"""
 
# Copyright © 2020  Brett Smith
 
# License: AGPLv3-or-later WITH Beancount-Plugin-Additional-Permission-1.0
 
#
 
# Full copyright and licensing details can be found at toplevel file
 
# LICENSE.txt in the repository.
 

	
 
import collections
 
import contextlib
 
import copy
 
import datetime
 
import io
 
import itertools
 
import re
 

	
 
import pytest
 

	
 
from . import testutil
 

	
 
import odf.table
 
import odf.text
 

	
 
from beancount.core import data as bc_data
 
from beancount import loader as bc_loader
 
from conservancy_beancount import data
 
from conservancy_beancount.reports import core
 
from conservancy_beancount.reports import ledger
 

	
 
clean_account_meta = contextlib.contextmanager(testutil.clean_account_meta)
 

	
 
Acct = data.Account
 

	
 
_ledger_load = bc_loader.load_file(testutil.test_path('books/ledger.beancount'))
 
DEFAULT_REPORT_SHEETS = [
 
    'Balance',
 
    'Income',
 
    'Expenses:Payroll',
 
    'Expenses',
 
    'Equity',
 
    'Assets:Receivable',
 
    'Liabilities:Payable',
 
    'Assets:PayPal',
 
    'Assets',
 
    'Liabilities',
 
]
 
PROJECT_REPORT_SHEETS = DEFAULT_REPORT_SHEETS[:5] + [
 
PROJECT_REPORT_SHEETS = [
 
    *DEFAULT_REPORT_SHEETS[:2],
 
    *DEFAULT_REPORT_SHEETS[3:6],
 
    'Assets:Prepaid',
 
    'Liabilities:UnearnedIncome',
 
    'Liabilities:Payable',
 
]
 
OVERSIZE_RE = re.compile(
 
    r'^([A-Za-z0-9:]+) has ([0-9,]+) rows, over size ([0-9,]+)$'
 
)
 
START_DATE = datetime.date(2018, 3, 1)
 
MID_DATE = datetime.date(2019, 3, 1)
 
STOP_DATE = datetime.date(2020, 3, 1)
 

	
 
REPORT_KWARGS = [
 
    {'report_class': ledger.LedgerODS},
 
    *({'report_class': ledger.TransactionODS, 'txn_filter': flags}
 
      for flags in ledger.TransactionFilter),
 
]
 

	
 
@pytest.fixture
 
def ledger_entries():
 
    return copy.deepcopy(_ledger_load[0])
 

	
 
def iter_accounts(entries):
 
    for entry in entries:
 
        if isinstance(entry, bc_data.Open):
 
            yield entry.account
 

	
 
class NotFound(Exception): pass
 
class NoSheet(NotFound): pass
 
class NoHeader(NotFound): pass
 

	
 
class ExpectedPostings(core.RelatedPostings):
 
    @classmethod
 
    def find_section(cls, ods, account):
 
        for sheet in ods.getElementsByType(odf.table.Table):
 
            sheet_account = sheet.getAttribute('name').replace(' ', ':')
 
            if sheet_account and account.is_under(sheet_account):
 
                break
 
        else:
 
            raise NoSheet(account)
 
        rows = iter(sheet.getElementsByType(odf.table.TableRow))
 
        for row in rows:
 
            cells = row.childNodes
 
            if (len(cells) >= 3
 
                and cells[1].text == account
 
                and not cells[0].text):
 
                break
 
        else:
 
            raise NoHeader(account)
 
        return rows
 

	
 
    @classmethod
 
    def check_not_in_report(cls, ods, *accounts):
 
        for account in accounts:
 
            with pytest.raises(NotFound):
 
                cls.find_section(ods, data.Account(account))
 

	
 
    @classmethod
 
    def check_in_report(cls, ods, account,
 
                        start_date=START_DATE, end_date=STOP_DATE, txn_filter=None):
 
        date = end_date + datetime.timedelta(days=1)
 
        txn = testutil.Transaction(date=date, postings=[
 
            (account, 0),
 
        ])
 
        related = cls(data.Posting.from_txn(txn))
 
        if txn_filter is None:
 
            related.check_report(ods, start_date, end_date)
 
        else:
 
            related.check_txn_report(ods, txn_filter, start_date, end_date)
 

	
 
    def slice_date_range(self, start_date, end_date):
 
        postings = enumerate(self)
 
        for start_index, post in postings:
 
            if start_date <= post.meta.date:
 
                break
 
        else:
 
            start_index += 1
 
        if end_date <= post.meta.date:
 
            end_index = start_index
 
        else:
 
            for end_index, post in postings:
 
                if end_date <= post.meta.date:
 
                    break
 
            else:
 
                end_index = None
 
        return (self[:start_index].balance_at_cost(),
 
                self[start_index:end_index])
 

	
 
    def check_report(self, ods, start_date, end_date, expect_totals=True):
 
        account = self[0].account
 
        norm_func = core.normalize_amount_func(account)
 
        open_bal, expect_posts = self.slice_date_range(start_date, end_date)
 
        open_bal = norm_func(open_bal)
 
        closing_bal = norm_func(expect_posts.balance_at_cost())
 
        rows = self.find_section(ods, account)
 
        if expect_totals and account.is_under('Assets', 'Liabilities'):
 
            opening_row = testutil.ODSCell.from_row(next(rows))
...
 
@@ -309,192 +312,197 @@ def test_plan_sheets_all_oversize(caplog):
 
    # In this case, each account should appear in alphabetical order.
 
    assert actual == ['Assets:Cash', 'Assets:Checking']
 
    check_oversize_logs(caplog, have, 100)
 

	
 
def test_plan_sheets_full_split_required(caplog):
 
    have = {
 
        Acct('Assets:Bank:Savings'): 98,
 
        Acct('Assets:Bank:Checking'): 96,
 
        Acct('Assets:Bank:Investment'): 94,
 
    }
 
    actual = ledger.LedgerODS.plan_sheets(have, ['Assets'], 100)
 
    assert actual == ['Assets:Bank:Checking', 'Assets:Bank:Savings', 'Assets']
 
    assert not caplog.records
 

	
 
def build_report(ledger_entries, start_date, stop_date, *args,
 
                 report_class=ledger.LedgerODS, **kwargs):
 
    postings = list(data.Posting.from_entries(iter(ledger_entries)))
 
    with clean_account_meta():
 
        data.Account.load_openings_and_closings(iter(ledger_entries))
 
        report = report_class(start_date, stop_date, *args, **kwargs)
 
        report.write(iter(postings))
 
    return postings, report
 

	
 
@pytest.mark.parametrize('report_kwargs', iter(REPORT_KWARGS))
 
@pytest.mark.parametrize('start_date,stop_date', [
 
    (START_DATE, STOP_DATE),
 
    (START_DATE, MID_DATE),
 
    (MID_DATE, STOP_DATE),
 
    (START_DATE.replace(month=6), START_DATE.replace(month=12)),
 
    (STOP_DATE, STOP_DATE.replace(month=12)),
 
])
 
def test_date_range_report(ledger_entries, start_date, stop_date, report_kwargs):
 
    txn_filter = report_kwargs.get('txn_filter')
 
    postings, report = build_report(ledger_entries, start_date, stop_date, **report_kwargs)
 
    expected = dict(ExpectedPostings.group_by_account(postings))
 
    for account in iter_accounts(ledger_entries):
 
        try:
 
            related = expected[account]
 
        except KeyError:
 
            ExpectedPostings.check_in_report(
 
                report.document, account, start_date, stop_date, txn_filter,
 
            )
 
        else:
 
            if txn_filter is None:
 
                related.check_report(report.document, start_date, stop_date)
 
            else:
 
                related.check_txn_report(
 
                    report.document, txn_filter, start_date, stop_date,
 
                )
 

	
 
@pytest.mark.parametrize('report_kwargs', iter(REPORT_KWARGS))
 
@pytest.mark.parametrize('tot_accts', [
 
    (),
 
    ('Assets', 'Liabilities'),
 
    ('Income', 'Expenses'),
 
    ('Assets', 'Liabilities', 'Income', 'Expenses'),
 
])
 
def test_report_filter_totals(ledger_entries, tot_accts, report_kwargs):
 
    txn_filter = report_kwargs.get('txn_filter')
 
    postings, report = build_report(ledger_entries, START_DATE, STOP_DATE,
 
                                    totals_with_entries=tot_accts,
 
                                    totals_without_entries=tot_accts,
 
                                    **report_kwargs)
 
    expected = dict(ExpectedPostings.group_by_account(postings))
 
    for account in iter_accounts(ledger_entries):
 
        expect_totals = account.startswith(tot_accts)
 
        if account in expected and expected[account][-1].meta.date >= START_DATE:
 
            if txn_filter is None:
 
                expected[account].check_report(
 
                    report.document, START_DATE, STOP_DATE, expect_totals=expect_totals,
 
                )
 
            else:
 
                expected[account].check_txn_report(
 
                    report.document, txn_filter,
 
                    START_DATE, STOP_DATE, expect_totals=expect_totals,
 
                )
 
        elif expect_totals:
 
            ExpectedPostings.check_in_report(
 
                report.document, account, START_DATE, STOP_DATE, txn_filter,
 
            )
 
        else:
 
            ExpectedPostings.check_not_in_report(report.document, account)
 

	
 
@pytest.mark.parametrize('report_kwargs', iter(REPORT_KWARGS))
 
@pytest.mark.parametrize('accounts', [
 
    ('Income', 'Expenses'),
 
    ('Assets:Receivable', 'Liabilities:Payable'),
 
])
 
def test_account_names_report(ledger_entries, accounts, report_kwargs):
 
    txn_filter = report_kwargs.get('txn_filter')
 
    postings, report = build_report(ledger_entries, START_DATE, STOP_DATE,
 
                                    accounts, **report_kwargs)
 
    expected = dict(ExpectedPostings.group_by_account(postings))
 
    for account in iter_accounts(ledger_entries):
 
        if not account.startswith(accounts):
 
            ExpectedPostings.check_not_in_report(report.document, account)
 
        # This account is reportable but has no postings
 
        elif account == 'Expenses:Payroll':
 
            ExpectedPostings.check_in_report(
 
                report.document, account, START_DATE, STOP_DATE, txn_filter,
 
            )
 
        elif txn_filter is None:
 
            expected[account].check_report(report.document, START_DATE, STOP_DATE)
 
        else:
 
            expected[account].check_txn_report(
 
                report.document, txn_filter, START_DATE, STOP_DATE,
 
            )
 

	
 
def run_main(arglist, config=None):
 
    if config is None:
 
        config = testutil.TestConfig(
 
            books_path=testutil.test_path('books/ledger.beancount'),
 
            rt_client=testutil.RTClient(),
 
        )
 
    arglist.insert(0, '--output-file=-')
 
    output = io.BytesIO()
 
    errors = io.StringIO()
 
    with clean_account_meta():
 
        retcode = ledger.main(arglist, output, errors, config)
 
    output.seek(0)
 
    return retcode, output, errors
 

	
 
def test_main(ledger_entries):
 
    retcode, output, errors = run_main([
 
        '-b', START_DATE.isoformat(),
 
        '-e', STOP_DATE.isoformat(),
 
    ])
 
    output.seek(0)
 
    assert not errors.getvalue()
 
    assert retcode == 0
 
    ods = odf.opendocument.load(output)
 
    assert get_sheet_names(ods) == DEFAULT_REPORT_SHEETS[:]
 
    postings = data.Posting.from_entries(iter(ledger_entries))
 
    expected = dict(ExpectedPostings.group_by_account(postings))
 
    for account in iter_accounts(ledger_entries):
 
        try:
 
            expected[account].check_report(ods, START_DATE, STOP_DATE)
 
        except KeyError:
 
            ExpectedPostings.check_in_report(ods, account)
 

	
 
@pytest.mark.parametrize('acct_arg', [
 
    'Liabilities',
 
    'Accounts payable',
 
])
 
def test_main_account_limit(ledger_entries, acct_arg):
 
    retcode, output, errors = run_main([
 
        '-a', acct_arg,
 
        '-b', START_DATE.isoformat(),
 
        '-e', STOP_DATE.isoformat(),
 
    ])
 
    assert not errors.getvalue()
 
    assert retcode == 0
 
    ods = odf.opendocument.load(output)
 
    assert get_sheet_names(ods) == ['Balance', 'Liabilities']
 
    postings = data.Posting.from_entries(ledger_entries)
 
    for account, expected in ExpectedPostings.group_by_account(postings):
 
        if account == 'Liabilities:UnearnedIncome':
 
            should_find = acct_arg == 'Liabilities'
 
        else:
 
            should_find = account.startswith('Liabilities')
 
        try:
 
            expected.check_report(ods, START_DATE, STOP_DATE)
 
        except NotFound:
 
            assert not should_find
 
        else:
 
            assert should_find
 

	
 
def test_main_account_classification_splits_hierarchy(ledger_entries):
 
    retcode, output, errors = run_main([
 
        '-a', 'Cash',
 
        '-b', START_DATE.isoformat(),
 
        '-e', STOP_DATE.isoformat(),
 
    ])
 
    assert not errors.getvalue()
 
    assert retcode == 0
 
    ods = odf.opendocument.load(output)
 
    assert get_sheet_names(ods) == ['Balance', 'Assets']
 
    postings = data.Posting.from_entries(ledger_entries)
 
    for account, expected in ExpectedPostings.group_by_account(postings):
 
        should_find = (account == 'Assets:Checking' or account == 'Assets:PayPal')
 
        try:
 
            expected.check_report(ods, START_DATE, STOP_DATE)
 
        except NotFound:
 
            assert not should_find, f"{account} not found in report"
 
        else:
 
            assert should_find, f"{account} in report but should be excluded"
 

	
 
@pytest.mark.parametrize('project,start_date,stop_date', [
 
    ('eighteen', START_DATE, MID_DATE.replace(day=30)),
 
    ('nineteen', MID_DATE, STOP_DATE),
 
])
 
def test_main_project_report(ledger_entries, project, start_date, stop_date):
 
    postings = data.Posting.from_entries(iter(ledger_entries))
 
    for key, related in ExpectedPostings.group_by_meta(postings, 'project'):
 
        if key == project:
 
            break
 
    assert key == project
0 comments (0 inline, 0 general)