Changeset - 8b8bdc022508
[Not reviewed]
0 5 0
Brett Smith - 4 years ago 2020-06-17 02:41:13
brettcsmith@brettcsmith.org
reports: Add BaseODS.column_style() method.

Use this to provide more consistent column styles throughout the reports.
5 files changed with 102 insertions and 39 deletions:
0 comments (0 inline, 0 general)
conservancy_beancount/reports/accrual.py
Show inline comments
 
#!/usr/bin/env python3
 
"""accrual-report - Status reports for accruals
 

	
 
accrual-report checks accruals (postings under Assets:Receivable and
 
Liabilities:Payable) for errors and metadata consistency, and reports any
 
problems on stderr. Then it writes a report about the status of those
 
accruals.
 

	
 
If you run it with no arguments, it will generate an aging report in ODS format
 
in the current directory.
 

	
 
Otherwise, the typical way to run it is to pass an RT ticket number or
 
invoice link as an argument, to report about accruals that match those
 
criteria::
 

	
 
    # Report all accruals associated with RT#1230:
 
    accrual-report 1230
 
    # Report all accruals with the invoice link rt:45/670.
 
    accrual-report 45/670
 
    # Report all accruals with the invoice link Invoice980.pdf.
 
    accrual-report Invoice980.pdf
 

	
 
By default, to stay fast, accrual-report only looks for postings from the
 
beginning of the last fiscal year. You can search further back in history
 
by passing the ``--since`` argument. The argument can be a fiscal year, or
 
a negative number of how many years back to search::
 

	
 
    # Search for accruals since 2016
 
    accrual-report --since 2016 [search terms …]
 
    # Search for accruals from the beginning of three fiscal years ago
 
    accrual-report --since -3 [search terms …]
 

	
 
If you want to further limit what accruals are reported, you can match on
 
other metadata by passing additional arguments in ``name=value`` format.
 
You can pass any number of search terms. For example::
 

	
 
    # Report accruals associated with RT#1230 and Jane Doe
 
    accrual-report 1230 entity=Doe-Jane
 

	
 
accrual-report will automatically decide what kind of report to generate
 
from the search terms you provide and the results they return. If you pass
 
no search terms, it generates an aging report. If your search terms match a
 
single outstanding payable, it writes an outgoing approval report.
 
Otherwise, it writes a basic balance report. You can specify what report
 
type you want with the ``--report-type`` option::
 

	
 
    # Write an outgoing approval report for all outstanding accruals for
 
    # Jane Doe, even if there's more than one
 
    accrual-report --report-type outgoing entity=Doe-Jane
 
    # Write an aging report for a specific project
 
    accrual-report --report-type aging project=ProjectName
 
"""
 
# Copyright © 2020  Brett Smith
 
#
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU Affero General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU Affero General Public License for more details.
 
#
 
# You should have received a copy of the GNU Affero General Public License
 
# along with this program.  If not, see <https://www.gnu.org/licenses/>.
 

	
 
import argparse
 
import collections
 
import datetime
 
import enum
 
import logging
 
import sys
 

	
 
from pathlib import Path
 

	
 
from typing import (
 
    cast,
 
    Any,
 
    BinaryIO,
 
    Callable,
 
    Iterable,
 
    Iterator,
 
    List,
 
    Mapping,
 
    NamedTuple,
 
    Optional,
 
    Sequence,
 
    Set,
 
    TextIO,
 
    Tuple,
 
    TypeVar,
 
    Union,
 
)
 
from ..beancount_types import (
 
    Entries,
 
    Error,
 
    Errors,
 
    MetaKey,
 
    MetaValue,
 
    Transaction,
 
)
 

	
 
import odf.style  # type:ignore[import]
 
import odf.table  # type:ignore[import]
 
import rt
 

	
 
from beancount.parser import printer as bc_printer
 

	
 
from . import core
 
from .. import books
 
from .. import cliutil
 
from .. import config as configmod
 
from .. import data
 
from .. import filters
 
from .. import rtutil
 

	
 
PROGNAME = 'accrual-report'
 

	
 
PostGroups = Mapping[Optional[str], 'AccrualPostings']
 
T = TypeVar('T')
 

	
 
logger = logging.getLogger('conservancy_beancount.reports.accrual')
 

	
 
class Sentinel:
 
    pass
 

	
 

	
 
class Account(NamedTuple):
 
    name: str
 
    aging_thresholds: Sequence[int]
 

	
 

	
 
class AccrualAccount(enum.Enum):
 
    # Note the aging report uses the same order accounts are defined here.
 
    # See AgingODS.start_spreadsheet().
 
    RECEIVABLE = Account('Assets:Receivable', [365, 120, 90, 60])
 
    PAYABLE = Account('Liabilities:Payable', [365, 90, 60, 30])
 

	
 
    @classmethod
 
    def account_names(cls) -> Iterator[str]:
 
        return (acct.value.name for acct in cls)
 

	
 
    @classmethod
 
    def by_account(cls, name: data.Account) -> 'AccrualAccount':
 
        for account in cls:
 
            if name.is_under(account.value.name):
 
                return account
 
        raise ValueError(f"unrecognized account {name!r}")
 

	
 
    @classmethod
 
    def classify(cls, related: core.RelatedPostings) -> 'AccrualAccount':
 
        for account in cls:
 
            account_name = account.value.name
 
            if all(post.account.is_under(account_name) for post in related):
 
                return account
 
        raise ValueError("unrecognized account set in related postings")
 

	
 
    @property
 
    def normalize_amount(self) -> Callable[[T], T]:
 
        return core.normalize_amount_func(self.value.name)
 

	
 

	
 
class AccrualPostings(core.RelatedPostings):
 
    __slots__ = (
 
        'accrual_type',
 
        'end_balance',
 
        'account',
 
        'entity',
 
        'invoice',
 
    )
 
    INCONSISTENT = Sentinel()
 

	
 
    def __init__(self,
 
                 source: Iterable[data.Posting]=(),
 
                 *,
 
                 _can_own: bool=False,
 
    ) -> None:
 
        super().__init__(source, _can_own=_can_own)
 
        self.account = self._single_item(post.account for post in self)
 
        if isinstance(self.account, Sentinel):
 
            self.accrual_type: Optional[AccrualAccount] = None
 
            norm_func: Callable[[T], T] = lambda x: x
 
            entity_pred: Callable[[data.Posting], bool] = bool
 
        else:
 
            self.accrual_type = AccrualAccount.by_account(self.account)
 
            norm_func = self.accrual_type.normalize_amount
 
            entity_pred = lambda post: norm_func(post.units).number > 0
 
        self.entity = self._single_item(self.entities(entity_pred))
 
        self.invoice = self._single_item(self.first_meta_links('invoice', None))
 
        self.end_balance = norm_func(self.balance_at_cost())
 

	
 
    def _single_item(self, seq: Iterable[T]) -> Union[T, Sentinel]:
 
        items = iter(seq)
 
        try:
 
            item1 = next(items)
 
        except StopIteration:
 
            all_same = False
 
        else:
 
            all_same = all(item == item1 for item in items)
 
        return item1 if all_same else self.INCONSISTENT
 

	
 
    def entities(self, pred: Callable[[data.Posting], bool]=bool) -> Iterator[MetaValue]:
 
        return filters.iter_unique(
 
            post.meta['entity']
 
            for post in self
 
            if pred(post) and 'entity' in post.meta
 
        )
 

	
 
    def make_consistent(self) -> Iterator[Tuple[str, 'AccrualPostings']]:
 
        account_ok = isinstance(self.account, str)
 
        entity_ok = isinstance(self.entity, str)
 
        # `'/' in self.invoice` is just our heuristic to ensure that the
 
        # invoice metadata is "unique enough," and not just a placeholder
 
        # value like "FIXME". It can be refined if needed.
 
        invoice_ok = isinstance(self.invoice, str) and '/' in self.invoice
 
        if account_ok and entity_ok and invoice_ok:
 
            # type ignore for <https://github.com/python/mypy/issues/6670>
 
            yield (self.invoice, self)  # type:ignore[misc]
 
            return
 
        groups = collections.defaultdict(list)
 
        for post in self:
 
            post_invoice = self.invoice if invoice_ok else (
 
                post.meta.get('invoice') or 'BlankInvoice'
 
            )
 
            post_entity = self.entity if entity_ok else (
 
                post.meta.get('entity') or 'BlankEntity'
 
            )
 
            groups[f'{post.account} {post_invoice} {post_entity}'].append(post)
 
        type_self = type(self)
 
        for group_key, posts in groups.items():
 
            yield group_key, type_self(posts, _can_own=True)
 

	
 
    def is_paid(self, default: Optional[bool]=None) -> Optional[bool]:
 
        if self.accrual_type is None:
 
            return default
 
        else:
 
            return self.end_balance.le_zero()
 

	
 
    def is_zero(self, default: Optional[bool]=None) -> Optional[bool]:
 
        if self.accrual_type is None:
 
            return default
 
        else:
 
            return self.end_balance.is_zero()
 

	
 
    def since_last_nonzero(self) -> 'AccrualPostings':
 
        for index, (post, balance) in enumerate(self.iter_with_balance()):
 
            if balance.is_zero():
 
                start_index = index
 
        try:
 
            empty = start_index == index
 
        except NameError:
 
            empty = True
 
        return self if empty else self[start_index + 1:]
 

	
 
    @property
 
    def rt_id(self) -> Union[str, None, Sentinel]:
 
        return self._single_item(self.first_meta_links('rt-id', None))
 

	
 

	
 
class BaseReport:
 
    def __init__(self, out_file: TextIO) -> None:
 
        self.out_file = out_file
 
        self.logger = logger.getChild(type(self).__name__)
 

	
 
    def _report(self, posts: AccrualPostings, index: int) -> Iterable[str]:
 
        raise NotImplementedError("BaseReport._report")
 

	
 
    def run(self, groups: PostGroups) -> None:
 
        for index, invoice in enumerate(groups):
 
            for line in self._report(groups[invoice], index):
 
                print(line, file=self.out_file)
 

	
 

	
 
class AgingODS(core.BaseODS[AccrualPostings, Optional[data.Account]]):
 
    DOC_COLUMNS = [
 
        'rt-id',
 
        'invoice',
 
        'approval',
 
        'contract',
 
        'purchase-order',
 
    ]
 
    COLUMNS = [
 
        'Date',
 
        data.Metadata.human_name('entity'),
 
        'Invoice Amount',
 
        'Booked Amount',
 
        data.Metadata.human_name('project'),
 
        *(data.Metadata.human_name(key) for key in DOC_COLUMNS),
 
    ]
 
    COL_COUNT = len(COLUMNS)
 

	
 
    def __init__(self,
 
                 rt_wrapper: rtutil.RT,
 
                 date: datetime.date,
 
                 logger: logging.Logger,
 
    ) -> None:
 
        super().__init__(rt_wrapper)
 
        self.date = date
 
        self.logger = logger
 

	
 
    def section_key(self, row: AccrualPostings) -> Optional[data.Account]:
 
        if isinstance(row.account, str):
 
            return row.account
 
        else:
 
            return None
 

	
 
    def start_spreadsheet(self) -> None:
 
        for accrual_type in AccrualAccount:
 
            self.use_sheet(accrual_type.name.title())
 
            for index in range(self.COL_COUNT):
 
                stylename = self.style_col1_25 if index else ''
 
                self.sheet.addElement(odf.table.TableColumn(stylename=stylename))
 
                if index == 0:
 
                    style: Union[str, odf.style.Style] = ''
 
                elif index < 6:
 
                    style = self.column_style(1.2)
 
                else:
 
                    style = self.column_style(1.5)
 
                self.sheet.addElement(odf.table.TableColumn(stylename=style))
 
            self.add_row(*(
 
                self.string_cell(name, stylename=self.style_bold)
 
                for name in self.COLUMNS
 
            ))
 
            self.lock_first_row()
 

	
 
    def start_section(self, key: Optional[data.Account]) -> None:
 
        if key is None:
 
            return
 
        self.age_thresholds = list(AccrualAccount.by_account(key).value.aging_thresholds)
 
        self.age_balances = [core.MutableBalance() for _ in self.age_thresholds]
 
        accrual_date = self.date - datetime.timedelta(days=self.age_thresholds[-1])
 
        acct_parts = key.slice_parts()
 
        self.use_sheet(acct_parts[1])
 
        self.add_row()
 
        self.add_row(self.string_cell(
 
            f"{' '.join(acct_parts[2:])} {acct_parts[1]} Aging Report"
 
            f" Accrued by {accrual_date.isoformat()} Unpaid by {self.date.isoformat()}",
 
            stylename=self.merge_styles(self.style_bold, self.style_centertext),
 
            numbercolumnsspanned=self.COL_COUNT,
 
        ))
 
        self.add_row()
 

	
 
    def end_section(self, key: Optional[data.Account]) -> None:
 
        if key is None:
 
            return
 
        total_balance = core.MutableBalance()
 
        text_style = self.merge_styles(self.style_bold, self.style_endtext)
 
        text_span = 4
 
        last_age_text: Optional[str] = None
 
        self.add_row()
 
        for threshold, balance in zip(self.age_thresholds, self.age_balances):
 
            years, days = divmod(threshold, 365)
 
            years_text = f"{years} {'Year' if years == 1 else 'Years'}"
 
            days_text = f"{days} Days"
 
            if years and days:
 
                age_text = f"{years_text} {days_text}"
 
            elif years:
 
                age_text = years_text
 
            else:
 
                age_text = days_text
 
            if last_age_text is None:
 
                age_range = f"Over {age_text}"
 
            else:
 
                age_range = f"{age_text}–{last_age_text}"
 
            self.add_row(
 
                self.string_cell(
 
                    f"Total Aged {age_range}: ",
 
                    stylename=text_style,
 
                    numbercolumnsspanned=text_span,
 
                ),
 
                *(odf.table.TableCell() for _ in range(1, text_span)),
 
                self.balance_cell(balance),
 
            )
 
            last_age_text = age_text
 
            total_balance += balance
 
        self.add_row(
 
            self.string_cell(
 
                "Total Unpaid: ",
 
                stylename=text_style,
 
                numbercolumnsspanned=text_span,
 
            ),
 
            *(odf.table.TableCell() for _ in range(1, text_span)),
 
            self.balance_cell(total_balance),
 
        )
 

	
 
    def write_row(self, row: AccrualPostings) -> None:
 
        age = (self.date - row[0].meta.date).days
 
        if row.end_balance.ge_zero():
 
            for index, threshold in enumerate(self.age_thresholds):
 
                if age >= threshold:
 
                    self.age_balances[index] += row.end_balance
 
                    break
 
            else:
 
                return
 
        raw_balance = row.balance()
 
        if row.accrual_type is not None:
 
            raw_balance = row.accrual_type.normalize_amount(raw_balance)
 
        if raw_balance == row.end_balance:
 
            amount_cell = odf.table.TableCell()
 
        else:
 
            amount_cell = self.balance_cell(raw_balance)
 
        projects = {post.meta.get('project') or None for post in row}
 
        projects.discard(None)
 
        self.add_row(
 
            self.date_cell(row[0].meta.date),
 
            self.multiline_cell(row.entities()),
 
            amount_cell,
 
            self.balance_cell(row.end_balance),
 
            self.multiline_cell(sorted(projects)),
 
            *(self.meta_links_cell(row.all_meta_links(key))
 
              for key in self.DOC_COLUMNS),
 
        )
 

	
 

	
 
class AgingReport(BaseReport):
 
    def __init__(self,
 
                 rt_wrapper: rtutil.RT,
 
                 out_file: BinaryIO,
 
                 date: Optional[datetime.date]=None,
 
    ) -> None:
 
        if date is None:
 
            date = datetime.date.today()
 
        self.out_bin = out_file
 
        self.logger = logger.getChild(type(self).__name__)
 
        self.ods = AgingODS(rt_wrapper, date, self.logger)
 

	
 
    def run(self, groups: PostGroups) -> None:
 
        rows: List[AccrualPostings] = []
 
        for group in groups.values():
 
            if group.is_zero():
 
                # Cheap optimization: don't slice and dice groups we're not
 
                # going to report anyway.
 
                continue
 
            elif group.accrual_type is None:
 
                group = group.since_last_nonzero()
 
            else:
 
                # Filter out new accruals after the report date.
 
                # e.g., cover the case that the same invoices has multiple
 
                # postings over time, and we don't want to report too-recent
 
                # ones.
 
                cutoff_date = self.ods.date - datetime.timedelta(
 
                    days=group.accrual_type.value.aging_thresholds[-1],
 
                )
 
                group = AccrualPostings(
 
                    post for post in group.since_last_nonzero()
 
                    if post.meta.date <= cutoff_date
 
                    or group.accrual_type.normalize_amount(post.units.number) < 0
 
                )
 
            if group and not group.is_zero():
 
                rows.append(group)
 
        rows.sort(key=lambda related: (
 
            related.account,
 
            related[0].meta.date,
 
            ('\0'.join(related.entities())
 
             if related.entity is related.INCONSISTENT
 
             else related.entity),
 
        ))
 
        self.ods.write(rows)
 
        self.ods.save_file(self.out_bin)
 

	
 

	
 
class BalanceReport(BaseReport):
 
    def _report(self, posts: AccrualPostings, index: int) -> Iterable[str]:
 
        posts = posts.since_last_nonzero()
 
        date_s = posts[0].meta.date.strftime('%Y-%m-%d')
 
        if index:
 
            yield ""
 
        yield f"{posts.invoice}:"
 
        yield f"  {posts.balance_at_cost()} outstanding since {date_s}"
 

	
 

	
 
class OutgoingReport(BaseReport):
 
    def __init__(self, rt_wrapper: rtutil.RT, out_file: TextIO) -> None:
 
        super().__init__(out_file)
 
        self.rt_wrapper = rt_wrapper
 
        self.rt_client = rt_wrapper.rt
 

	
 
    def _primary_rt_id(self, posts: AccrualPostings) -> rtutil.TicketAttachmentIds:
 
        rt_id = posts.rt_id
 
        if rt_id is None:
 
            raise ValueError("no rt-id links found")
 
        elif isinstance(rt_id, Sentinel):
 
            raise ValueError("multiple rt-id links found")
 
        parsed = rtutil.RT.parse(rt_id)
 
        if parsed is None:
 
            raise ValueError("rt-id is not a valid RT reference")
 
        else:
 
            return parsed
 

	
 
    def _report(self, posts: AccrualPostings, index: int) -> Iterable[str]:
 
        posts = posts.since_last_nonzero()
 
        try:
 
            ticket_id, _ = self._primary_rt_id(posts)
 
            ticket = self.rt_client.get_ticket(ticket_id)
 
            # Note we only use this when ticket is None.
 
            errmsg = f"ticket {ticket_id} not found"
 
        except (ValueError, rt.RtError) as error:
 
            ticket = None
 
            errmsg = error.args[0]
 
        if ticket is None:
 
            self.logger.error(
 
                "can't generate outgoings report for %s because no RT ticket available: %s",
 
                posts.invoice, errmsg,
 
            )
 
            return
 

	
 
        try:
 
            rt_requestor = self.rt_client.get_user(ticket['Requestors'][0])
 
        except (IndexError, rt.RtError):
 
            rt_requestor = None
 
        if rt_requestor is None:
 
            requestor = ''
 
            requestor_name = ''
 
        else:
 
            requestor_name = (
 
                rt_requestor.get('RealName')
 
                or ticket.get('CF.{payment-to}')
 
                or ''
 
            )
 
            requestor = f'{requestor_name} <{rt_requestor["EmailAddress"]}>'.strip()
 

	
 
        balance_s = posts.end_balance.format(None)
 
        raw_balance = -posts.balance()
 
        if raw_balance != posts.end_balance:
 
            balance_s = f'{raw_balance} ({balance_s})'
 

	
 
        contract_links = list(posts.all_meta_links('contract'))
 
        if contract_links:
 
            contract_s = ' , '.join(self.rt_wrapper.iter_urls(
 
                contract_links, missing_fmt='<BROKEN RT LINK: {}>',
 
            ))
 
        else:
 
            contract_s = "NO CONTRACT GOVERNS THIS TRANSACTION"
 
        projects = [v for v in posts.meta_values('project')
 
                    if isinstance(v, str)]
 

	
 
        yield "PAYMENT FOR APPROVAL:"
 
        yield f"REQUESTOR: {requestor}"
 
        yield f"TOTAL TO PAY: {balance_s}"
 
        yield f"AGREEMENT: {contract_s}"
 
        yield f"PAYMENT TO: {ticket.get('CF.{payment-to}') or requestor_name}"
 
        yield f"PAYMENT METHOD: {ticket.get('CF.{payment-method}', '')}"
 
        yield f"PROJECT: {', '.join(projects)}"
 
        yield "\nBEANCOUNT ENTRIES:\n"
 

	
 
        last_txn: Optional[Transaction] = None
 
        for post in posts:
 
            txn = post.meta.txn
 
            if txn is not last_txn:
 
                last_txn = txn
 
                txn = self.rt_wrapper.txn_with_urls(txn, '{}')
 
                yield bc_printer.format_entry(txn)
 

	
 

	
 
class ReportType(enum.Enum):
 
    AGING = AgingReport
 
    BALANCE = BalanceReport
 
    OUTGOING = OutgoingReport
 
    AGE = AGING
 
    BAL = BALANCE
 
    OUT = OUTGOING
 
    OUTGOINGS = OUTGOING
 

	
 
    @classmethod
 
    def by_name(cls, name: str) -> 'ReportType':
 
        try:
 
            return cls[name.upper()]
 
        except KeyError:
 
            raise ValueError(f"unknown report type {name!r}") from None
 

	
 

	
 
class ReturnFlag(enum.IntFlag):
 
    LOAD_ERRORS = 1
 
    # 2 was used in the past, it can probably be reclaimed.
 
    REPORT_ERRORS = 4
 
    NOTHING_TO_REPORT = 8
 

	
 

	
 
def filter_search(postings: Iterable[data.Posting],
 
                  search_terms: Iterable[cliutil.SearchTerm],
 
) -> Iterable[data.Posting]:
 
    accounts = tuple(AccrualAccount.account_names())
 
    postings = (post for post in postings if post.account.is_under(*accounts))
 
    for query in search_terms:
 
        postings = query.filter_postings(postings)
 
    return postings
 

	
 
def parse_arguments(arglist: Optional[Sequence[str]]=None) -> argparse.Namespace:
 
    parser = argparse.ArgumentParser(prog=PROGNAME)
 
    cliutil.add_version_argument(parser)
 
    parser.add_argument(
 
        '--report-type', '-t',
 
        metavar='NAME',
 
        type=ReportType.by_name,
 
        help="""The type of report to generate, one of `aging`, `balance`, or
 
`outgoing`. If not specified, the default is `aging` when no search terms are
 
given, `outgoing` for search terms that return a single outstanding payable,
 
and `balance` any other time.
 
""")
 
    parser.add_argument(
 
        '--since',
 
        metavar='YEAR',
 
        type=int,
 
        default=-1,
 
        help="""How far back to search the books for related transactions.
 
You can either specify a fiscal year, or a negative offset from the current
 
fiscal year, to start loading entries from. The default is -1 (start from the
 
previous fiscal year).
 
""")
 
    parser.add_argument(
 
        '--output-file', '-O',
 
        metavar='PATH',
 
        type=Path,
 
        help="""Write the report to this file, or stdout when PATH is `-`.
 
The default is stdout for the balance and outgoing reports, and a generated
 
filename for other reports.
 
""")
 
    cliutil.add_loglevel_argument(parser)
 
    parser.add_argument(
 
        'search_terms',
 
        metavar='FILTER',
 
        type=cliutil.SearchTerm.arg_parser('invoice', 'rt-id'),
 
        nargs=argparse.ZERO_OR_MORE,
 
        help="""Report on accruals that match this criteria. The format is
 
NAME=TERM. TERM is a link or word that must exist in a posting's NAME
 
metadata to match. A single ticket number is a shortcut for
 
`rt-id=rt:NUMBER`. Any other link, including an RT attachment link in
 
`TIK/ATT` format, is a shortcut for `invoice=LINK`.
 
""")
 
    args = parser.parse_args(arglist)
 
    if args.report_type is None and not args.search_terms:
 
        args.report_type = ReportType.AGING
 
    return args
 

	
 
def main(arglist: Optional[Sequence[str]]=None,
 
         stdout: TextIO=sys.stdout,
 
         stderr: TextIO=sys.stderr,
 
         config: Optional[configmod.Config]=None,
 
) -> int:
 
    args = parse_arguments(arglist)
 
    cliutil.set_loglevel(logger, args.loglevel)
 
    if config is None:
 
        config = configmod.Config()
 
        config.load_file()
 

	
 
    returncode = 0
 
    books_loader = config.books_loader()
 
    if books_loader is None:
 
        entries, load_errors, _ = books.Loader.load_none(config.config_file_path())
 
    elif args.report_type is ReportType.AGING:
 
        entries, load_errors, _ = books_loader.load_all()
 
    else:
 
        entries, load_errors, _ = books_loader.load_all(args.since)
 
    filters.remove_opening_balance_txn(entries)
 
    for error in load_errors:
 
        bc_printer.print_error(error, file=stderr)
 
        returncode |= ReturnFlag.LOAD_ERRORS
 

	
 
    postings = list(filter_search(
 
        data.Posting.from_entries(entries), args.search_terms,
 
    ))
 
    if not postings:
 
        logger.warning("no matching entries found to report")
 
        returncode |= ReturnFlag.NOTHING_TO_REPORT
 
    # groups is a mapping of metadata value strings to AccrualPostings.
 
    # The keys are basically arbitrary, the report classes don't rely on them,
 
    # but they do help symbolize what's being grouped.
 
    # For the outgoing approval report, groups maps rt-id link strings to
 
    # associated accruals.
 
    # For all other reports, groups starts by grouping postings together by
 
    # invoice link string, then uses AccrualReport.make_consistent() to split
 
    # out groups that need it.
 
    groups: PostGroups
 
    if args.report_type is None or args.report_type is ReportType.OUTGOING:
 
        groups = dict(AccrualPostings.group_by_first_meta_link(postings, 'rt-id'))
 
        if (args.report_type is None
 
            and len(groups) == 1
 
            and all(group.accrual_type is AccrualAccount.PAYABLE
 
                    and not group.is_paid()
 
                    and key  # Make sure we have a usable rt-id
 
                    for key, group in groups.items())
 
        ):
 
            args.report_type = ReportType.OUTGOING
 
    if args.report_type is not ReportType.OUTGOING:
 
        groups = {
 
            key: group
 
            for _, source in AccrualPostings.group_by_first_meta_link(postings, 'invoice')
 
            for key, group in source.make_consistent()
 
        }
 
    if args.report_type is not ReportType.AGING:
 
        groups = {
 
            key: posts for key, posts in groups.items() if not posts.is_paid()
 
        } or groups
 
    del postings
 

	
 
    report: Optional[BaseReport] = None
 
    output_path: Optional[Path] = None
 
    if args.report_type is ReportType.AGING:
 
        rt_wrapper = config.rt_wrapper()
 
        if rt_wrapper is None:
 
            logger.error("unable to generate aging report: RT client is required")
 
        else:
 
            now = datetime.datetime.now()
conservancy_beancount/reports/core.py
Show inline comments
...
 
@@ -82,955 +82,979 @@ class Balance(Mapping[str, data.Amount]):
 

	
 
    Each key is a Beancount currency string, and each value represents the
 
    balance in that currency.
 
    """
 
    __slots__ = ('_currency_map', 'tolerance')
 
    TOLERANCE = Decimal('0.01')
 

	
 
    def __init__(self,
 
                 source: Iterable[data.Amount]=(),
 
                 tolerance: Optional[Decimal]=None,
 
    ) -> None:
 
        if tolerance is None:
 
            tolerance = self.TOLERANCE
 
        self.tolerance = tolerance
 
        self._currency_map: Dict[str, data.Amount] = {}
 
        for amount in source:
 
            self._add_amount(self._currency_map, amount)
 

	
 
    def _add_amount(self,
 
                    currency_map: MutableMapping[str, data.Amount],
 
                    amount: data.Amount,
 
    ) -> None:
 
        code = amount.currency
 
        try:
 
            current_number = currency_map[code].number
 
        except KeyError:
 
            current_number = Decimal(0)
 
        currency_map[code] = data.Amount(current_number + amount.number, code)
 

	
 
    def _add_other(self,
 
                   currency_map: MutableMapping[str, data.Amount],
 
                   other: Union[data.Amount, 'Balance'],
 
    ) -> None:
 
        if isinstance(other, Balance):
 
            for amount in other.values():
 
                self._add_amount(currency_map, amount)
 
        else:
 
            self._add_amount(currency_map, other)
 

	
 
    def __repr__(self) -> str:
 
        values = [repr(amt) for amt in self.values()]
 
        return f"{type(self).__name__}({values!r})"
 

	
 
    def __str__(self) -> str:
 
        return self.format()
 

	
 
    def __abs__(self: BalanceType) -> BalanceType:
 
        return type(self)(bc_amount.abs(amt) for amt in self.values())
 

	
 
    def __add__(self: BalanceType, other: Union[data.Amount, 'Balance']) -> BalanceType:
 
        retval_map = self._currency_map.copy()
 
        self._add_other(retval_map, other)
 
        return type(self)(retval_map.values())
 

	
 
    def __sub__(self: BalanceType, other: Union[data.Amount, 'Balance']) -> BalanceType:
 
        return self.__add__(-other)
 

	
 
    def __eq__(self, other: Any) -> bool:
 
        if isinstance(other, Balance):
 
            clean_self = self.clean_copy()
 
            clean_other = other.clean_copy()
 
            return len(clean_self) == len(clean_other) and all(
 
                clean_self[key] == clean_other.get(key) for key in clean_self
 
            )
 
        else:
 
            return super().__eq__(other)
 

	
 
    def __neg__(self: BalanceType) -> BalanceType:
 
        return type(self)(-amt for amt in self.values())
 

	
 
    def __pos__(self: BalanceType) -> BalanceType:
 
        return self
 

	
 
    def __getitem__(self, key: str) -> data.Amount:
 
        return self._currency_map[key]
 

	
 
    def __iter__(self) -> Iterator[str]:
 
        return iter(self._currency_map)
 

	
 
    def __len__(self) -> int:
 
        return len(self._currency_map)
 

	
 
    def _all_amounts(self,
 
                     op_func: Callable[[DecimalCompat, DecimalCompat], bool],
 
                     operand: DecimalCompat,
 
    ) -> bool:
 
        return all(op_func(amt.number, operand) for amt in self.values())
 

	
 
    def copy(self: BalanceType) -> BalanceType:
 
        return type(self)(self.values())
 

	
 
    def clean_copy(self: BalanceType, tolerance: Optional[Decimal]=None) -> BalanceType:
 
        if tolerance is None:
 
            tolerance = self.tolerance
 
        return type(self)(
 
            amount for amount in self.values()
 
            if abs(amount.number) >= tolerance
 
        )
 

	
 
    @staticmethod
 
    def within_tolerance(dec: DecimalCompat, tolerance: DecimalCompat) -> bool:
 
        dec = cast(Decimal, dec)
 
        return abs(dec) < tolerance
 

	
 
    def eq_zero(self) -> bool:
 
        """Returns true if all amounts in the balance == 0, within tolerance."""
 
        return self._all_amounts(self.within_tolerance, self.tolerance)
 

	
 
    is_zero = eq_zero
 

	
 
    def ge_zero(self) -> bool:
 
        """Returns true if all amounts in the balance >= 0, within tolerance."""
 
        op_func = operator.gt if self.tolerance else operator.ge
 
        return self._all_amounts(op_func, -self.tolerance)
 

	
 
    def le_zero(self) -> bool:
 
        """Returns true if all amounts in the balance <= 0, within tolerance."""
 
        op_func = operator.lt if self.tolerance else operator.le
 
        return self._all_amounts(op_func, self.tolerance)
 

	
 
    def format(self,
 
               fmt: Optional[str]='#,#00.00 ¤¤',
 
               sep: str=', ',
 
               empty: str="Zero balance",
 
               tolerance: Optional[Decimal]=None,
 
    ) -> str:
 
        """Formats the balance as a string with the given parameters
 

	
 
        If the balance is zero (within tolerance), returns ``empty``.
 
        Otherwise, returns a string with each amount in the balance formatted
 
        as ``fmt``, separated by ``sep``.
 

	
 
        If you set ``fmt`` to None, amounts will be formatted according to the
 
        user's locale. The default format is Beancount's input format.
 
        """
 
        amounts = list(self.clean_copy(tolerance).values())
 
        if not amounts:
 
            return empty
 
        amounts.sort(key=lambda amt: abs(amt.number), reverse=True)
 
        return sep.join(
 
            babel.numbers.format_currency(amt.number, amt.currency, fmt)
 
            for amt in amounts
 
        )
 

	
 

	
 
class MutableBalance(Balance):
 
    __slots__ = ()
 

	
 
    def __iadd__(self: BalanceType, other: Union[data.Amount, Balance]) -> BalanceType:
 
        self._add_other(self._currency_map, other)
 
        return self
 

	
 
    def __isub__(self: BalanceType, other: Union[data.Amount, Balance]) -> BalanceType:
 
        self._add_other(self._currency_map, -other)
 
        return self
 

	
 

	
 
class RelatedPostings(Sequence[data.Posting]):
 
    """Collect and query related postings
 

	
 
    This class provides common functionality for collecting related postings
 
    and running queries on them: iterating over them, tallying their balance,
 
    etc.
 

	
 
    This class doesn't know anything about how the postings are related. That's
 
    entirely up to the caller.
 

	
 
    A common pattern is to use this class with collections.defaultdict
 
    to organize postings based on some key. See the group_by_meta classmethod
 
    for an example.
 
    """
 
    __slots__ = ('_postings',)
 

	
 
    def __init__(self,
 
                 source: Iterable[data.Posting]=(),
 
                 *,
 
                 _can_own: bool=False,
 
    ) -> None:
 
        self._postings: List[data.Posting]
 
        if _can_own and isinstance(source, list):
 
            self._postings = source
 
        else:
 
            self._postings = list(source)
 

	
 
    @classmethod
 
    def _group_by(cls: Type[RelatedType],
 
                  postings: Iterable[data.Posting],
 
                  key: Callable[[data.Posting], T],
 
    ) -> Iterator[Tuple[T, RelatedType]]:
 
        mapping: Dict[T, List[data.Posting]] = collections.defaultdict(list)
 
        for post in postings:
 
            mapping[key(post)].append(post)
 
        for value, posts in mapping.items():
 
            yield value, cls(posts, _can_own=True)
 

	
 
    @classmethod
 
    def group_by_account(cls: Type[RelatedType],
 
                         postings: Iterable[data.Posting],
 
    ) -> Iterator[Tuple[data.Account, RelatedType]]:
 
        return cls._group_by(postings, operator.attrgetter('account'))
 

	
 
    @classmethod
 
    def group_by_meta(cls: Type[RelatedType],
 
                      postings: Iterable[data.Posting],
 
                      key: MetaKey,
 
                      default: Optional[MetaValue]=None,
 
    ) -> Iterator[Tuple[Optional[MetaValue], RelatedType]]:
 
        """Relate postings by metadata value
 

	
 
        This method takes an iterable of postings and returns a mapping.
 
        The keys of the mapping are the values of post.meta.get(key, default).
 
        The values are RelatedPostings instances that contain all the postings
 
        that had that same metadata value.
 
        """
 
        def key_func(post: data.Posting) -> Optional[MetaValue]:
 
            return post.meta.get(key, default)
 
        return cls._group_by(postings, key_func)
 

	
 
    @classmethod
 
    def group_by_first_meta_link(
 
            cls: Type[RelatedType],
 
            postings: Iterable[data.Posting],
 
            key: MetaKey,
 
    ) -> Iterator[Tuple[Optional[str], RelatedType]]:
 
        """Relate postings by the first link in metadata
 

	
 
        This method takes an iterable of postings and returns a mapping.
 
        The keys of the mapping are the values of
 
        post.meta.first_link(key, None).
 
        The values are RelatedPostings instances that contain all the postings
 
        that had that same first metadata link.
 
        """
 
        def key_func(post: data.Posting) -> Optional[MetaValue]:
 
            return post.meta.first_link(key, None)
 
        return cls._group_by(postings, key_func)
 

	
 
    def __repr__(self) -> str:
 
        return f'<{type(self).__name__} {self._postings!r}>'
 

	
 
    @overload
 
    def __getitem__(self: RelatedType, index: int) -> data.Posting: ...
 

	
 
    @overload
 
    def __getitem__(self: RelatedType, s: slice) -> RelatedType: ...
 

	
 
    def __getitem__(self: RelatedType,
 
                    index: Union[int, slice],
 
    ) -> Union[data.Posting, RelatedType]:
 
        if isinstance(index, slice):
 
            return type(self)(self._postings[index], _can_own=True)
 
        else:
 
            return self._postings[index]
 

	
 
    def __len__(self) -> int:
 
        return len(self._postings)
 

	
 
    def all_meta_links(self, key: MetaKey) -> Iterator[str]:
 
        return filters.iter_unique(
 
            link for post in self for link in post.meta.report_links(key)
 
        )
 

	
 
    @overload
 
    def first_meta_links(self, key: MetaKey, default: str='') -> Iterator[str]: ...
 

	
 
    @overload
 
    def first_meta_links(self, key: MetaKey, default: None) -> Iterator[Optional[str]]: ...
 

	
 
    def first_meta_links(self,
 
                         key: MetaKey,
 
                         default: Optional[str]='',
 
    ) -> Iterator[Optional[str]]:
 
        retval = filters.iter_unique(
 
            post.meta.first_link(key, default) for post in self
 
        )
 
        if default == '':
 
            retval = (s for s in retval if s)
 
        return retval
 

	
 
    def iter_with_balance(self) -> Iterator[Tuple[data.Posting, Balance]]:
 
        balance = MutableBalance()
 
        for post in self:
 
            balance += post.units
 
            yield post, balance
 

	
 
    def balance(self) -> Balance:
 
        return Balance(post.units for post in self)
 

	
 
    def balance_at_cost(self) -> Balance:
 
        return Balance(post.at_cost() for post in self)
 

	
 
    def balance_at_cost_by_date(self, date: datetime.date) -> Balance:
 
        for index, post in enumerate(self):
 
            if post.meta.date >= date:
 
                break
 
        else:
 
            index += 1
 
        return Balance(post.at_cost() for post in self._postings[:index])
 

	
 
    def meta_values(self,
 
                    key: MetaKey,
 
                    default: Optional[MetaValue]=None,
 
    ) -> Set[Optional[MetaValue]]:
 
        return {post.meta.get(key, default) for post in self}
 

	
 

	
 
class BaseSpreadsheet(Generic[RT, ST], metaclass=abc.ABCMeta):
 
    """Abstract base class to help write spreadsheets
 

	
 
    This class provides the very core logic to write an arbitrary set of data
 
    rows to arbitrary output. It calls hooks when it starts writing the
 
    spreadsheet, starts a new "section" of rows, ends a section, and ends the
 
    spreadsheet.
 

	
 
    RT is the type of the input data rows. ST is the type of the section
 
    identifier that you create from each row. If you don't want to use the
 
    section logic at all, set ST to None and define section_key to return None.
 
    """
 

	
 
    @abc.abstractmethod
 
    def section_key(self, row: RT) -> ST:
 
        """Return the section a row belongs to
 

	
 
        Given a data row, this method should return some identifier for the
 
        "section" the row belongs to. The write method uses this to
 
        determine when to call start_section and end_section.
 

	
 
        If your spreadsheet doesn't need sections, define this to return None.
 
        """
 
        ...
 

	
 
    @abc.abstractmethod
 
    def write_row(self, row: RT) -> None:
 
        """Write a data row to the output spreadsheet
 

	
 
        This method is called once for each data row in the input.
 
        """
 
        ...
 

	
 
    # The next four methods are all called by the write method when the name
 
    # says. You may override them to output headers or sums, record
 
    # state, etc. The default implementations are all noops.
 

	
 
    def start_spreadsheet(self) -> None:
 
        pass
 

	
 
    def start_section(self, key: ST) -> None:
 
        pass
 

	
 
    def end_section(self, key: ST) -> None:
 
        pass
 

	
 
    def end_spreadsheet(self) -> None:
 
        pass
 

	
 
    def write(self, rows: Iterable[RT]) -> None:
 
        prev_section: Optional[ST] = None
 
        self.start_spreadsheet()
 
        for row in rows:
 
            section = self.section_key(row)
 
            if section != prev_section:
 
                if prev_section is not None:
 
                    self.end_section(prev_section)
 
                self.start_section(section)
 
                prev_section = section
 
            self.write_row(row)
 
        try:
 
            should_end = section is not None
 
        except NameError:
 
            should_end = False
 
        if should_end:
 
            self.end_section(section)
 
        self.end_spreadsheet()
 

	
 

	
 
class BaseODS(BaseSpreadsheet[RT, ST], metaclass=abc.ABCMeta):
 
    """Abstract base class to help write OpenDocument spreadsheets
 

	
 
    This class provides the very core logic to write an arbitrary set of data
 
    rows to an OpenDocument spreadsheet. It provides helper methods for
 
    building sheets, rows, and cells.
 

	
 
    See also the BaseSpreadsheet base class for additional documentation about
 
    methods you must and can define, the definition of RT and ST, etc.
 
    """
 
    # Defined in the XSL spec, "Definitions of Units of Measure"
 
    MEASUREMENT_UNITS = frozenset([
 
        'cm',
 
        'em',
 
        'in',
 
        'mm',
 
        'pc',
 
        'pt',
 
        'px',
 
    ])
 
    MEASUREMENT_RE = re.compile(
 
        r'([-+]?(?:\d+\.?|\.\d+|\d+\.\d+))({})'.format('|'.join(MEASUREMENT_UNITS)),
 
        re.ASCII,
 
    )
 

	
 
    def __init__(self, rt_wrapper: Optional[rtutil.RT]=None) -> None:
 
        self.rt_wrapper = rt_wrapper
 
        self.locale = babel.core.Locale.default('LC_MONETARY')
 
        self.currency_fmt_key = 'accounting'
 
        self._name_counter = itertools.count(1)
 
        self._currency_style_cache: MutableMapping[str, odf.style.Style] = {}
 
        self.document = odf.opendocument.OpenDocumentSpreadsheet()
 
        self.init_settings()
 
        self.init_styles()
 
        self.sheet = self.use_sheet("Report")
 

	
 
    ### Low-level document tree manipulation
 
    # The *intent* is that you only need to use these if you're adding new
 
    # methods to manipulate document settings or styles.
 

	
 
    def copy_element(self, elem: odf.element.Element) -> odf.element.Element:
 
        qattrs = dict(self.iter_qattributes(elem))
 
        retval = odf.element.Element(qname=elem.qname, qattributes=qattrs)
 
        try:
 
            orig_name = retval.getAttribute('name')
 
        except ValueError:
 
            orig_name = None
 
        if orig_name is not None:
 
            retval.setAttribute('name', f'{orig_name}{next(self._name_counter)}')
 
        return retval
 

	
 
    def ensure_child(self,
 
                     parent: odf.element.Element,
 
                     child_type: ElementType,
 
                     **kwargs: Any,
 
    ) -> odf.element.Element:
 
        new_child = child_type(**kwargs)
 
        found_child = self.find_child(parent, new_child)
 
        if found_child is None:
 
            parent.addElement(new_child)
 
            return parent.lastChild
 
        else:
 
            return found_child
 

	
 
    def ensure_config_map_entry(self,
 
                                root: odf.element.Element,
 
                                map_name: str,
 
                                entry_name: str,
 
    ) -> odf.element.Element:
 
        """Return a ``ConfigItemMapEntry`` under ``root``
 

	
 
        This method ensures there's a ``ConfigItemMapNamed`` named ``map_name``
 
        under ``root``, and a ``ConfigItemMapEntry`` named ``entry_name`` under
 
        that. Return the ``ConfigItemMapEntry`` element.
 
        """
 
        config_map = self.ensure_child(root, odf.config.ConfigItemMapNamed, name=map_name)
 
        return self.ensure_child(config_map, odf.config.ConfigItemMapEntry, name=entry_name)
 

	
 
    def find_child(self,
 
                   parent: odf.element.Element,
 
                   child: odf.element.Element,
 
    ) -> Optional[odf.element.Element]:
 
        attrs = {k: v for k, v in self.iter_attributes(child)}
 
        if not attrs:
 
            return None
 
        for elem in parent.childNodes:
 
            if (elem.qname == child.qname
 
                and all(elem.getAttribute(k) == v for k, v in attrs.items())):
 
                return elem
 
        return None
 

	
 
    def iter_attributes(self, elem: odf.element.Element) -> Iterator[Tuple[str, str]]:
 
        for (_, key), value in self.iter_qattributes(elem):
 
            yield key.lower().replace('-', ''), value
 

	
 
    def iter_qattributes(self, elem: odf.element.Element) -> Iterator[Tuple[Tuple[str, str], str]]:
 
        if elem.attributes:
 
            yield from elem.attributes.items()
 

	
 
    def replace_child(self,
 
                     parent: odf.element.Element,
 
                     child_type: ElementType,
 
                     **kwargs: Any,
 
    ) -> odf.element.Element:
 
        new_child = child_type(**kwargs)
 
        found_child = self.find_child(parent, new_child)
 
        parent.insertBefore(new_child, found_child)
 
        if found_child is not None:
 
            parent.removeChild(found_child)
 
        return new_child
 

	
 
    def set_config(self,
 
                   root: odf.element.Element,
 
                   name: str,
 
                   value: Union[bool, int, str],
 
                   config_type: Optional[str]=None,
 
    ) -> None:
 
        """Ensure ``root`` has a ``ConfigItem`` with the given name, type, and value"""
 
        value_s = str(value)
 
        if isinstance(value, bool):
 
            value_s = str(value).lower()
 
            default_type = 'boolean'
 
        elif isinstance(value, str):
 
            default_type = 'string'
 
        if config_type is None:
 
            try:
 
                config_type = default_type
 
            except NameError:
 
                raise ValueError(
 
                    f"need config_type for {type(value).__name__} value",
 
                ) from None
 
        item = self.replace_child(
 
            root, odf.config.ConfigItem, name=name, type=config_type,
 
        )
 
        item.addText(value_s)
 

	
 
    ### Styles
 

	
 
    def column_style(self, width: Union[float, str], **attrs: Any) -> odf.style.Style:
 
        if not isinstance(width, str) or (width and not width[-1].isalpha()):
 
            width = f'{width}in'
 
        match = self.MEASUREMENT_RE.fullmatch(width)
 
        if match is None:
 
            raise ValueError(f"invalid width {width!r}")
 
        width_float = float(match.group(1))
 
        if width_float <= 0:
 
            # Per the OpenDocument spec, column-width is a positiveLength.
 
            raise ValueError(f"width {width!r} must be positive")
 
        width = '{:.3g}{}'.format(width_float, match.group(2))
 
        retval = self.ensure_child(
 
            self.document.automaticstyles,
 
            odf.style.Style,
 
            name=f'col_{width.replace(".", "_")}'
 
        )
 
        retval.setAttribute('family', 'table-column')
 
        if retval.firstChild is None:
 
            retval.addElement(odf.style.TableColumnProperties(
 
                columnwidth=width, **attrs
 
            ))
 
        return retval
 

	
 
    def _build_currency_style(
 
            self,
 
            root: odf.element.Element,
 
            locale: babel.core.Locale,
 
            code: str,
 
            fmt_index: int,
 
            properties: Optional[odf.style.TextProperties]=None,
 
            *,
 
            fmt_key: Optional[str]=None,
 
            volatile: bool=False,
 
            minintegerdigits: int=1,
 
    ) -> odf.element.Element:
 
        if fmt_key is None:
 
            fmt_key = self.currency_fmt_key
 
        pattern = locale.currency_formats[fmt_key]
 
        fmts = pattern.pattern.split(';')
 
        try:
 
            fmt = fmts[fmt_index]
 
        except IndexError:
 
            fmt = fmts[0]
 
            grouping = pattern.grouping[0]
 
        else:
 
            grouping = pattern.grouping[fmt_index]
 
        zero_s = babel.numbers.format_currency(0, code, '##0.0', locale)
 
        try:
 
            decimal_index = zero_s.rindex('.') + 1
 
        except ValueError:
 
            decimalplaces = 0
 
        else:
 
            decimalplaces = len(zero_s) - decimal_index
 
        style = self.replace_child(
 
            root,
 
            odf.number.CurrencyStyle,
 
            name=f'{code}{next(self._name_counter)}',
 
        )
 
        style.setAttribute('volatile', 'true' if volatile else 'false')
 
        if properties is not None:
 
            style.addElement(properties)
 
        for part in re.split(r"(¤+|[#0,.]+|'[^']+')", fmt):
 
            if not part:
 
                pass
 
            elif not part.strip('#0,.'):
 
                style.addElement(odf.number.Number(
 
                    decimalplaces=str(decimalplaces),
 
                    grouping='true' if grouping else 'false',
 
                    minintegerdigits=str(minintegerdigits),
 
                ))
 
            elif part == '¤':
 
                style.addElement(odf.number.CurrencySymbol(
 
                    country=locale.territory,
 
                    language=locale.language,
 
                    text=babel.numbers.get_currency_symbol(code, locale),
 
                ))
 
            elif part == '¤¤':
 
                style.addElement(odf.number.Text(text=code))
 
            else:
 
                style.addElement(odf.number.Text(text=part.strip("'")))
 
        return style
 

	
 
    def currency_style(
 
            self,
 
            code: str,
 
            locale: Optional[babel.core.Locale]=None,
 
            negative_properties: Optional[odf.style.TextProperties]=None,
 
            positive_properties: Optional[odf.style.TextProperties]=None,
 
            root: odf.element.Element=None,
 
    ) -> odf.style.Style:
 
        """Create and return a spreadsheet style to format currency data
 

	
 
        Given a currency code and a locale, this method will create all the
 
        styles necessary to format the currency according to the locale's
 
        rules, including rendering of decimal points and negative values.
 

	
 
        You may optionally pass in TextProperties to use for negative and
 
        positive amounts, respectively. If you don't, negative values will
 
        automatically be rendered in red (text color #f00).
 

	
 
        Results are cached. If you repeatedly call this method with the same
 
        arguments, you'll keep getting the same style returned, which will
 
        only be added to the document once.
 
        """
 
        if locale is None:
 
            locale = self.locale
 
        if negative_properties is None:
 
            negative_properties = odf.style.TextProperties(color='#ff0000')
 
        if root is None:
 
            root = self.document.styles
 
        cache_parts = [str(id(root)), code, str(locale)]
 
        for key, value in self.iter_attributes(negative_properties):
 
            cache_parts.append(f'{key}={value}')
 
        if positive_properties is not None:
 
            cache_parts.append('')
 
            for key, value in self.iter_attributes(positive_properties):
 
                cache_parts.append(f'{key}={value}')
 
        cache_key = '\0'.join(cache_parts)
 
        try:
 
            style = self._currency_style_cache[cache_key]
 
        except KeyError:
 
            pos_style = self._build_currency_style(
 
                root, locale, code, 0, positive_properties, volatile=True,
 
            )
 
            curr_style = self._build_currency_style(
 
                root, locale, code, 1, negative_properties,
 
            )
 
            curr_style.addElement(odf.style.Map(
 
                condition='value()>=0', applystylename=pos_style,
 
            ))
 
            style = self.ensure_child(
 
                self.document.styles,
 
                odf.style.Style,
 
                name=f'{curr_style.getAttribute("name")}Cell',
 
                family='table-cell',
 
                datastylename=curr_style,
 
            )
 
            self._currency_style_cache[cache_key] = style
 
        return style
 

	
 
    def _merge_style_iter_names(
 
            self,
 
            styles: Sequence[Union[str, odf.style.Style, None]],
 
    ) -> Iterator[str]:
 
        for source in styles:
 
            if source is None:
 
                continue
 
            elif not isinstance(source, str):
 
                source = source.getAttribute('name')
 
            if source.startswith('Merge_'):
 
                orig_names = iter(source.split('_'))
 
                next(orig_names)
 
                yield from orig_names
 
            else:
 
                yield source
 

	
 
    def _merge_styles(self,
 
                      new_style: odf.style.Style,
 
                      sources: Iterable[odf.style.Style],
 
    ) -> None:
 
        for elem in sources:
 
            for key, new_value in self.iter_attributes(elem):
 
                old_value = new_style.getAttribute(key)
 
                if (key == 'name'
 
                    or key == 'displayname'
 
                    or old_value == new_value):
 
                    pass
 
                elif old_value is None:
 
                    new_style.setAttribute(key, new_value)
 
                else:
 
                    raise ValueError(f"cannot merge styles with conflicting {key}")
 
            for child in elem.childNodes:
 
                new_style.addElement(self.copy_element(child))
 

	
 
    def merge_styles(self,
 
                     *styles: Union[str, odf.style.Style, None],
 
    ) -> Optional[odf.style.Style]:
 
        """Create a new style from multiple existing styles
 

	
 
        Given any number of existing styles, create a new style that combines
 
        all of those styles' attributes and properties, add it to the document
 
        styles, and return it.
 

	
 
        Styles can be specified by name, or by passing in their Style element.
 
        For convenience, you can also pass in None as an argument; None will
 
        simply be skipped.
 

	
 
        Results are cached. If you repeatedly call this method with the same
 
        arguments, you'll keep getting the same style returned, which will
 
        only be added to the document once.
 

	
 
        If you pass in zero real style arguments, returns None.
 
        If you pass in one style argument, returns that style unchanged.
 
        If you pass in a style that doesn't already exist in the document,
 
        or if you pass in styles that can't be merged (because they have
 
        conflicting attributes), raises ValueError.
 
        """
 
        name_map: Dict[str, odf.style.Style] = {}
 
        for name in self._merge_style_iter_names(styles):
 
            source = odf.style.Style(name=name)
 
            found = self.find_child(self.document.styles, source)
 
            if found is None:
 
                raise ValueError(f"no style named {name!r}")
 
            name_map[name] = found
 
        if not name_map:
 
            retval = None
 
        elif len(name_map) == 1:
 
            _, retval = name_map.popitem()
 
        else:
 
            new_name = f'Merge_{"_".join(sorted(name_map))}'
 
            retval = self.ensure_child(
 
                self.document.styles, odf.style.Style, name=new_name,
 
            )
 
            if retval.firstChild is None:
 
                self._merge_styles(retval, name_map.values())
 
        return retval
 

	
 
    ### Sheets
 

	
 
    def lock_first_row(self, sheet: Optional[odf.table.Table]=None) -> None:
 
        """Lock the first row of cells under the given sheet
 

	
 
        This method sets all the appropriate settings to "lock" the first row
 
        of cells in a sheet, so it stays in view even as the viewer scrolls
 
        through rows. If a sheet is not given, works on ``self.sheet``.
 
        """
 
        if sheet is None:
 
            sheet = self.sheet
 
        config_map = self.ensure_config_map_entry(
 
            self.view, 'Tables', sheet.getAttribute('name'),
 
        )
 
        self.set_config(config_map, 'PositionBottom', 1, 'int')
 
        self.set_config(config_map, 'VerticalSplitMode', 2, 'short')
 
        self.set_config(config_map, 'VerticalSplitPosition', 1, 'short')
 

	
 
    def use_sheet(self, name: str) -> odf.table.Table:
 
        """Switch the active sheet ``self.sheet`` to the one with the given name
 

	
 
        If there is no sheet with the given name, create it and append it to
 
        the spreadsheet first.
 

	
 
        If the current active sheet is empty when this method is called, it
 
        will be removed from the spreadsheet.
 
        """
 
        try:
 
            empty_sheet = not self.sheet.hasChildNodes()
 
        except AttributeError:
 
            empty_sheet = False
 
        if empty_sheet:
 
            self.document.spreadsheet.removeChild(self.sheet)
 
        self.sheet = self.ensure_child(
 
            self.document.spreadsheet, odf.table.Table, name=name,
 
        )
 
        return self.sheet
 

	
 
    ### Initialization hooks
 

	
 
    def init_settings(self) -> None:
 
        """Hook called to initialize settings
 

	
 
        This method is called by __init__ to populate
 
        ``self.document.settings``. This implementation creates the barest
 
        skeleton structure necessary to support other methods, in particular
 
        ``lock_first_row``.
 
        """
 
        view_settings = self.ensure_child(
 
            self.document.settings, odf.config.ConfigItemSet, name='ooo:view-settings',
 
        )
 
        views = self.ensure_child(
 
            view_settings, odf.config.ConfigItemMapIndexed, name='Views',
 
        )
 
        self.view = self.ensure_child(views, odf.config.ConfigItemMapEntry)
 
        self.set_config(self.view, 'ViewId', 'view1')
 

	
 
    def init_styles(self) -> None:
 
        """Hook called to initialize settings
 

	
 
        This method is called by __init__ to populate
 
        ``self.document.styles``. This implementation creates basic building
 
        block cell styles often used in financial reports.
 
        """
 
        styles = self.document.styles
 
        self.style_bold = self.ensure_child(
 
            styles, odf.style.Style, name='Bold', family='table-cell',
 
        )
 
        self.ensure_child(
 
            self.style_bold, odf.style.TextProperties, fontweight='bold',
 
        )
 
        self.style_dividerline = self.ensure_child(
 
            styles, odf.style.Style, name='DividerLine', family='table-cell',
 
        )
 
        self.ensure_child(
 
            self.style_dividerline,
 
            odf.style.TableCellProperties,
 
            borderbottom='1pt solid #0000ff',
 
        )
 

	
 
        date_style = self.replace_child(styles, odf.number.DateStyle, name='ISODate')
 
        date_style.addElement(odf.number.Year(style='long'))
 
        date_style.addElement(odf.number.Text(text='-'))
 
        date_style.addElement(odf.number.Month(style='long'))
 
        date_style.addElement(odf.number.Text(text='-'))
 
        date_style.addElement(odf.number.Day(style='long'))
 
        self.style_date = self.ensure_child(
 
            styles,
 
            odf.style.Style,
 
            name=f'{date_style.getAttribute("name")}Cell',
 
            family='table-cell',
 
            datastylename=date_style,
 
        )
 

	
 
        self.style_starttext: odf.style.Style
 
        self.style_centertext: odf.style.Style
 
        self.style_endtext: odf.style.Style
 
        for textalign in ['start', 'center', 'end']:
 
            aligned_style = self.replace_child(
 
                styles, odf.style.Style, name=f'{textalign.title()}Text',
 
            )
 
            aligned_style.setAttribute('family', 'table-cell')
 
            aligned_style.addElement(odf.style.ParagraphProperties(textalign=textalign))
 
            setattr(self, f'style_{textalign}text', aligned_style)
 

	
 
        self.style_col1: odf.style.Style
 
        self.style_col1_25: odf.style.Style
 
        self.style_col1_5: odf.style.Style
 
        self.style_col1_75: odf.style.Style
 
        self.style_col2: odf.style.Style
 
        for width in ['1', '1.25', '1.5', '1.75', '2']:
 
            width_name = width.replace('.', '_')
 
            column_style = self.replace_child(
 
                self.document.automaticstyles, odf.style.Style, name=f'col_{width_name}',
 
            )
 
            column_style.setAttribute('family', 'table-column')
 
            column_style.addElement(odf.style.TableColumnProperties(columnwidth=f'{width}in'))
 
            setattr(self, f'style_col{width_name}', column_style)
 

	
 
    ### Rows and cells
 

	
 
    def add_row(self, *cells: odf.table.TableCell, **attrs: Any) -> odf.table.TableRow:
 
        row = odf.table.TableRow(**attrs)
 
        for cell in cells:
 
            row.addElement(cell)
 
        self.sheet.addElement(row)
 
        return row
 

	
 
    def balance_cell(self, balance: Balance, **attrs: Any) -> odf.table.TableCell:
 
        if balance.is_zero():
 
            return self.float_cell(0, **attrs)
 
        elif len(balance) == 1:
 
            amount = next(iter(balance.values()))
 
            attrs['stylename'] = self.merge_styles(
 
                attrs.get('stylename'), self.currency_style(amount.currency),
 
            )
 
            return self.currency_cell(amount, **attrs)
 
        else:
 
            lines = [babel.numbers.format_currency(
 
                number, currency, locale=self.locale, format_type=self.currency_fmt_key,
 
            ) for number, currency in balance.values()]
 
            attrs['stylename'] = self.merge_styles(
 
                attrs.get('stylename'), self.style_endtext,
 
            )
 
            return self.multiline_cell(lines, **attrs)
 

	
 
    def currency_cell(self, amount: data.Amount, **attrs: Any) -> odf.table.TableCell:
 
        if 'stylename' not in attrs:
 
            attrs['stylename'] = self.currency_style(amount.currency)
 
        number, currency = amount
 
        cell = odf.table.TableCell(valuetype='currency', value=number, **attrs)
 
        cell.addElement(odf.text.P(text=babel.numbers.format_currency(
 
            number, currency, locale=self.locale, format_type=self.currency_fmt_key,
 
        )))
 
        return cell
 

	
 
    def date_cell(self, date: datetime.date, **attrs: Any) -> odf.table.TableCell:
 
        attrs.setdefault('stylename', self.style_date)
 
        cell = odf.table.TableCell(valuetype='date', datevalue=date, **attrs)
 
        cell.addElement(odf.text.P(text=date.isoformat()))
 
        return cell
 

	
 
    def float_cell(self, value: Union[int, float, Decimal], **attrs: Any) -> odf.table.TableCell:
 
        cell = odf.table.TableCell(valuetype='float', value=value, **attrs)
 
        cell.addElement(odf.text.P(text=str(value)))
 
        return cell
 

	
 
    def _meta_link_pairs(self, links: Iterable[Optional[str]]) -> Iterator[Tuple[str, str]]:
 
        for href in links:
 
            if href is None:
 
                continue
 
            elif self.rt_wrapper is not None:
 
                rt_ids = self.rt_wrapper.parse(href)
 
                rt_href = rt_ids and self.rt_wrapper.url(*rt_ids)
 
            else:
 
                rt_ids = None
 
                rt_href = None
 
            if rt_ids is None or rt_href is None:
 
                # '..' pops the ODS filename off the link path. In other words,
 
                # make the link relative to the directory the ODS is in.
 
                href_path = Path('..', href)
 
                href = str(href_path)
 
                text = href_path.name
 
            else:
 
                rt_path = urlparse.urlparse(rt_href).path
 
                if rt_path.endswith('/Ticket/Display.html'):
 
                    text = rtutil.RT.unparse(*rt_ids)
 
                else:
 
                    text = urlparse.unquote(Path(rt_path).name)
 
                href = rt_href
 
            yield (href, text)
 

	
 
    def meta_links_cell(self, links: Iterable[Optional[str]], **attrs: Any) -> odf.table.TableCell:
 
        return self.multilink_cell(self._meta_link_pairs(links), **attrs)
 

	
 
    def multiline_cell(self, lines: Iterable[Any], **attrs: Any) -> odf.table.TableCell:
 
        cell = odf.table.TableCell(valuetype='string', **attrs)
 
        for line in lines:
 
            cell.addElement(odf.text.P(text=str(line)))
 
        return cell
 

	
 
    def multilink_cell(self, links: Iterable[LinkType], **attrs: Any) -> odf.table.TableCell:
 
        cell = odf.table.TableCell(valuetype='string', **attrs)
 
        for link in links:
 
            if isinstance(link, tuple):
 
                href, text = link
 
            else:
 
                href = link
 
                text = None
 
            cell.addElement(odf.text.P())
 
            cell.lastChild.addElement(odf.text.A(
 
                type='simple', href=href, text=text or href,
 
            ))
 
        return cell
 

	
 
    def string_cell(self, text: str, **attrs: Any) -> odf.table.TableCell:
 
        cell = odf.table.TableCell(valuetype='string', **attrs)
 
        cell.addElement(odf.text.P(text=text))
 
        return cell
 

	
 
    def write_row(self, row: RT) -> None:
 
        """Write a single row of input data to the spreadsheet
 

	
 
        This default implementation adds a single row to the spreadsheet,
 
        with one cell per element of the row. The type of each element
 
        determines what kind of cell is created.
 

	
 
        This implementation will help get you started, but you'll probably
 
        want to override it to specify styles.
 
        """
 
        out_row = odf.table.TableRow()
 
        for cell_source in row:
 
            if isinstance(cell_source, (int, float, Decimal)):
 
                cell = self.float_cell(cell_source)
 
            else:
 
                cell = self.string_cell(cell_source)
 
            out_row.addElement(cell)
 
        self.sheet.addElement(out_row)
 

	
 
    def save_file(self, out_file: BinaryIO) -> None:
 
        self.document.write(out_file)
 

	
 
    def save_path(self, path: Path, mode: str='w') -> None:
 
        with path.open(f'{mode}b') as out_file:
 
            out_file = cast(BinaryIO, out_file)
 
            self.save_file(out_file)
 

	
 

	
 
def normalize_amount_func(account_name: str) -> Callable[[T], T]:
 
    """Get a function to normalize amounts for reporting
 

	
 
    Given an account name, return a function that can be used on "amounts"
 
    under that account (including numbers, Amount objects, and Balance objects)
 
    to normalize them for reporting. Right now that means make flipping the
 
    sign for accounts where "normal" postings are negative.
 
    """
 
    if account_name.startswith(('Assets:', 'Expenses:')):
 
        # We can't just return operator.pos because Beancount's Amount class
 
        # doesn't implement __pos__.
 
        return lambda amt: amt
 
    elif account_name.startswith(('Equity:', 'Income:', 'Liabilities:')):
 
        return operator.neg
 
    else:
 
        raise ValueError(f"unrecognized account name {account_name!r}")
conservancy_beancount/reports/ledger.py
Show inline comments
 
"""ledger.py - General ledger report from Beancount
 

	
 
This tool produces a spreadsheet that shows postings in Beancount, organized
 
by account.
 

	
 
Specify the date range you want to report with the ``--begin`` and ``--end``
 
options.
 

	
 
Select the accounts you want to report with the ``--account`` option. You can
 
specify this option multiple times. The report will include at least one sheet
 
for each account you specify. Subaccounts will be reported on that sheet as
 
well.
 

	
 
Select the postings you want to report by passing metadata search terms in
 
``name=value`` format.
 

	
 
Run ``ledger-report --help`` for abbreviations and other options.
 

	
 
Examples
 
--------
 

	
 
Report all activity related to a given project::
 

	
 
    ledger-report project=NAME
 

	
 
Get all Assets postings for a given month to help with reconciliation::
 

	
 
    ledger-report -a Assets -b 2018-05-01 -e 2018-06-01
 
"""
 
# Copyright © 2020  Brett Smith
 
#
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU Affero General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU Affero General Public License for more details.
 
#
 
# You should have received a copy of the GNU Affero General Public License
 
# along with this program.  If not, see <https://www.gnu.org/licenses/>.
 

	
 
import argparse
 
import collections
 
import datetime
 
import enum
 
import itertools
 
import operator
 
import logging
 
import sys
 

	
 
from typing import (
 
    Callable,
 
    Dict,
 
    Iterable,
 
    Iterator,
 
    List,
 
    Mapping,
 
    Optional,
 
    Sequence,
 
    TextIO,
 
    Tuple,
 
    Union,
 
)
 

	
 
from pathlib import Path
 

	
 
import odf.table  # type:ignore[import]
 

	
 
from beancount.parser import printer as bc_printer
 

	
 
from . import core
 
from .. import books
 
from .. import cliutil
 
from .. import config as configmod
 
from .. import data
 
from .. import ranges
 
from .. import rtutil
 

	
 
PostTally = List[Tuple[int, data.Account]]
 

	
 
PROGNAME = 'ledger-report'
 
logger = logging.getLogger('conservancy_beancount.reports.ledger')
 

	
 
class LedgerODS(core.BaseODS[data.Posting, data.Account]):
 
    CORE_COLUMNS: Sequence[str] = [
 
        'Date',
 
        data.Metadata.human_name('entity'),
 
        'Description',
 
        'Original Amount',
 
        'Booked Amount',
 
    ]
 
    ACCOUNT_COLUMNS: Dict[str, Sequence[str]] = collections.OrderedDict([
 
        ('Income', ['project', 'rt-id', 'receipt', 'income-type']),
 
        ('Expenses', ['project', 'rt-id', 'receipt', 'approval', 'expense-allocation']),
 
        ('Equity', ['rt-id']),
 
        ('Assets:Receivable', ['project', 'rt-id', 'invoice', 'approval', 'contract', 'purchase-order']),
 
        ('Liabilities:Payable', ['project', 'rt-id', 'invoice', 'approval', 'contract', 'purchase-order']),
 
        ('Assets:PayPal', ['rt-id', 'paypal-id', 'receipt', 'approval']),
 
        ('Assets', ['rt-id', 'receipt', 'approval', 'bank-statement']),
 
        ('Liabilities', ['rt-id', 'receipt', 'approval', 'bank-statement']),
 
    ])
 
    COLUMN_STYLES: Mapping[str, str] = {
 
        'Date': '',
 
        'Description': 'col_1_75',
 
        data.Metadata.human_name('paypal-id'): 'col_1_5',
 
    }
 
    # Excel 2003 was limited to 65,536 rows per worksheet.
 
    # While we can probably count on all our users supporting more modern
 
    # formats (Excel 2007 supports over 1 million rows per worksheet),
 
    # keeping the default limit conservative seems good to avoid running into
 
    # other limits (like the number of hyperlinks per worksheet), plus just
 
    # better for human organization and readability.
 
    SHEET_SIZE = 65000
 

	
 
    def __init__(self,
 
                 start_date: datetime.date,
 
                 stop_date: datetime.date,
 
                 sheet_names: Optional[Sequence[str]]=None,
 
                 rt_wrapper: Optional[rtutil.RT]=None,
 
                 sheet_size: Optional[int]=None,
 
    ) -> None:
 
        if sheet_names is None:
 
            sheet_names = list(self.ACCOUNT_COLUMNS)
 
        if sheet_size is None:
 
            sheet_size = self.SHEET_SIZE
 
        super().__init__(rt_wrapper)
 
        self.date_range = ranges.DateRange(start_date, stop_date)
 
        self.required_sheet_names = sheet_names
 
        self.sheet_size = sheet_size
 

	
 
    def init_styles(self) -> None:
 
        super().init_styles()
 
        self.amount_column = self.column_style(1.2)
 
        self.default_column = self.column_style(1.5)
 
        self.column_styles: Mapping[str, Union[str, odf.style.Style]] = {
 
            'Date': '',
 
            'Description': self.column_style(2),
 
            'Original Amount': self.amount_column,
 
            'Booked Amount': self.amount_column,
 
            data.Metadata.human_name('project'): self.amount_column,
 
            data.Metadata.human_name('rt-id'): self.amount_column,
 
        }
 

	
 
    @classmethod
 
    def _group_tally(
 
            cls,
 
            tally_by_account: PostTally,
 
            key: Callable[[data.Account], Optional[str]],
 
    ) -> Dict[str, PostTally]:
 
        retval: Dict[str, PostTally] = collections.defaultdict(list)
 
        for count, account in tally_by_account:
 
            item_key = key(account)
 
            if item_key is not None:
 
                retval[item_key].append((count, account))
 
        return retval
 

	
 
    @classmethod
 
    def _split_sheet(
 
            cls,
 
            tally_by_account: PostTally,
 
            sheet_size: int,
 
            sheet_name: str,
 
    ) -> Iterator[str]:
 
        total = 0
 
        for index, (count, account) in enumerate(tally_by_account):
 
            total += count
 
            if total > sheet_size:
 
                break
 
        else:
 
            # All the accounts fit in this sheet.
 
            yield sheet_name
 
            return
 
        if index == 0 and len(tally_by_account) == 1:
 
            # With one account, we can't split any further, so warn and stop.
 
            logger.warning(
 
                "%s has %s rows, over size %s",
 
                account, f'{count:,g}', f'{sheet_size:,g}',
 
            )
 
            yield sheet_name
 
            return
 
        group_func = operator.methodcaller('root_part', sheet_name.count(':') + 2)
 
        maybe_split = cls._group_tally(tally_by_account[:index], group_func)
 
        must_split = cls._group_tally(tally_by_account[index:], group_func)
 
        for subkey, must_split_tally in sorted(must_split.items()):
 
            split_names = cls._split_sheet(
 
                maybe_split.get(subkey, []) + must_split_tally, sheet_size, subkey,
 
            )
 
            # We must be willing to split out at least as many sheets as there
 
            # are accounts that didn't fit. Do that first.
 
            yield from itertools.islice(split_names, len(must_split_tally))
 
            # After that, we can be in one of two cases:
 
            # 1. There is no next sheet. All the accounts, including the
 
            #    maybe_splits and must_splits, fit on planned subsheets.
 
            #    Update state to note we don't need a sheet for them anymore.
 
            # 2. The next sheet is named `subkey`, and is planned to include
 
            #    all of our maybe_split accounts. However, we don't need to
 
            #    yield that sheet name, because those accounts already fit in
 
            #    the sheet we're planning, and it would be a needless split.
 
            next_sheet_name = next(split_names, None)
 
            if next_sheet_name is None:
 
                maybe_split.pop(subkey, None)
 
            else:
 
                assert next_sheet_name == subkey
 
                assert not any(split_names)
 
        if maybe_split:
 
            yield sheet_name
 

	
 
    @classmethod
 
    def plan_sheets(
 
            cls,
 
            tally_by_account: Mapping[data.Account, int],
 
            base_sheets: Sequence[str],
 
            sheet_size: int,
 
    ) -> Sequence[str]:
 
        sorted_tally: PostTally = [
 
            (count, account)
 
            for account, count in tally_by_account.items()
 
        ]
 
        sorted_tally.sort()
 
        split_tally = cls._group_tally(
 
            sorted_tally,
 
            operator.methodcaller('is_under', *base_sheets),
 
        )
 
        return [
 
            sheet_name
 
            for key in base_sheets
 
            for sheet_name in cls._split_sheet(split_tally[key], sheet_size, key)
 
        ]
 

	
 
    @staticmethod
 
    def _sort_and_filter_accounts(
 
            accounts: Iterable[data.Account],
 
            order: Sequence[str],
 
    ) -> Iterator[Tuple[int, data.Account]]:
 
        index_map = {s: ii for ii, s in enumerate(order)}
 
        retval: Mapping[int, List[data.Account]] = collections.defaultdict(list)
 
        for account in accounts:
 
            acct_key = account.is_under(*order)
 
            if acct_key is not None:
 
                retval[index_map[acct_key]].append(account)
 
        for key in sorted(retval):
 
            acct_list = retval[key]
 
            acct_list.sort()
 
            for account in acct_list:
 
                yield key, account
 

	
 
    def section_key(self, row: data.Posting) -> data.Account:
 
        return row.account
 

	
 
    def start_sheet(self, sheet_name: str) -> None:
 
        self.use_sheet(sheet_name.replace(':', ' '))
 
        columns_key = data.Account(sheet_name).is_under(*self.ACCOUNT_COLUMNS)
 
        # columns_key must not be None because ACCOUNT_COLUMNS has an entry
 
        # for all five root accounts.
 
        assert columns_key is not None
 
        self.metadata_columns = self.ACCOUNT_COLUMNS[columns_key]
 
        self.sheet_columns: Sequence[str] = [
 
            *self.CORE_COLUMNS,
 
            *(data.Metadata.human_name(meta_key) for meta_key in self.metadata_columns),
 
        ]
 
        for col_name in self.sheet_columns:
 
            self.sheet.addElement(odf.table.TableColumn(
 
                stylename=self.COLUMN_STYLES.get(col_name, 'col_1_25'),
 
                stylename=self.column_styles.get(col_name, self.default_column),
 
            ))
 
        self.add_row(*(
 
            self.string_cell(col_name, stylename=self.style_bold)
 
            for col_name in self.sheet_columns
 
        ))
 
        self.lock_first_row()
 

	
 
    def _report_section_balance(self, key: data.Account, date_key: str) -> None:
 
        uses_opening = key.is_under('Assets', 'Equity', 'Liabilities')
 
        if date_key == 'start':
 
            if not uses_opening:
 
                return
 
            date = self.date_range.start
 
            description = "Opening Balance"
 
        else:
 
            date = self.date_range.stop
 
            description = "Ending Balance" if uses_opening else "Period Total"
 
        balance = self.norm_func(
 
            self.account_groups[key].balance_at_cost_by_date(date)
 
        )
 
        self.add_row(
 
            self.date_cell(date, stylename=self.merge_styles(
 
                self.style_bold, self.style_date,
 
            )),
 
            odf.table.TableCell(),
 
            self.string_cell(description, stylename=self.style_bold),
 
            odf.table.TableCell(),
 
            self.balance_cell(balance, stylename=self.style_bold),
 
        )
 

	
 
    def start_section(self, key: data.Account) -> None:
 
        self.add_row()
 
        self.add_row(
 
            odf.table.TableCell(),
 
            self.string_cell(
 
                f"{key} Ledger"
 
                f" From {self.date_range.start.isoformat()}"
 
                f" To {self.date_range.stop.isoformat()}",
 
                stylename=self.style_bold,
 
                numbercolumnsspanned=len(self.sheet_columns) - 1,
 
            ),
 
        )
 
        self.norm_func = core.normalize_amount_func(key)
 
        self._report_section_balance(key, 'start')
 

	
 
    def end_section(self, key: data.Account) -> None:
 
        self._report_section_balance(key, 'stop')
 

	
 
    def write_row(self, row: data.Posting) -> None:
 
        if row.meta.date not in self.date_range:
 
            return
 
        elif row.cost is None:
 
            amount_cell = odf.table.TableCell()
 
        else:
 
            amount_cell = self.currency_cell(self.norm_func(row.units))
 
        self.add_row(
 
            self.date_cell(row.meta.date),
 
            self.string_cell(row.meta.get('entity') or ''),
 
            self.string_cell(row.meta.txn.narration),
 
            amount_cell,
 
            self.currency_cell(self.norm_func(row.at_cost())),
 
            *(self.meta_links_cell(row.meta.report_links(key))
 
              if key in data.LINK_METADATA
 
              else self.string_cell(row.meta.get(key, ''))
 
              for key in self.metadata_columns),
 
        )
 

	
 
    def _combined_balance_row(self,
 
                              date: datetime.date,
 
                              balance_accounts: Sequence[str],
 
    ) -> None:
 
        balance = -sum((
 
            related.balance_at_cost_by_date(date)
 
            for account, related in self.account_groups.items()
 
            if account.is_under(*balance_accounts)
 
        ), core.MutableBalance())
 
        self.add_row(
 
            self.string_cell(
 
                f"Balance as of {date.isoformat()}",
 
                stylename=self.merge_styles(self.style_bold, self.style_endtext),
 
            ),
 
            self.balance_cell(balance, stylename=self.style_bold),
 
        )
 

	
 
    def write_balance_sheet(self) -> None:
 
        balance_accounts = ['Equity', 'Income', 'Expenses']
 
        # FIXME: This is a hack to exclude non-project Equity accounts from
 
        # project reports.
 
        if balance_accounts[0] not in self.required_sheet_names:
 
            balance_accounts[0] = 'Equity:Funds'
 
        self.use_sheet("Balance")
 
        column_style = self.replace_child(
 
            self.document.automaticstyles, odf.style.Style, name='col_3',
 
        )
 
        column_style.setAttribute('family', 'table-column')
 
        column_style.addElement(odf.style.TableColumnProperties(columnwidth='3in'))
 
        for _ in range(2):
 
            self.sheet.addElement(odf.table.TableColumn(stylename=column_style))
 
        self.sheet.addElement(odf.table.TableColumn(stylename=self.column_style(3)))
 
        self.sheet.addElement(odf.table.TableColumn(stylename=self.column_style(1.5)))
 
        self.add_row(
 
            self.string_cell("Account", stylename=self.style_bold),
 
            self.string_cell("Balance", stylename=self.style_bold),
 
        )
 
        self.lock_first_row()
 
        self.add_row()
 
        self.add_row(self.string_cell(
 
            f"Ledger From {self.date_range.start.isoformat()}"
 
            f" To {self.date_range.stop.isoformat()}",
 
            stylename=self.merge_styles(self.style_centertext, self.style_bold),
 
            numbercolumnsspanned=2,
 
        ))
 
        self.add_row()
 
        self._combined_balance_row(self.date_range.start, balance_accounts)
 
        for _, account in self._sort_and_filter_accounts(
 
                self.account_groups, balance_accounts,
 
        ):
 
            related = self.account_groups[account]
 
            # start_bal - stop_bal == -(stop_bal - start_bal)
 
            balance = related.balance_at_cost_by_date(self.date_range.start)
 
            balance -= related.balance_at_cost_by_date(self.date_range.stop)
 
            if not balance.is_zero():
 
                self.add_row(
 
                    self.string_cell(account, stylename=self.style_endtext),
 
                    self.balance_cell(balance),
 
                )
 
        self._combined_balance_row(self.date_range.stop, balance_accounts)
 

	
 
    def write(self, rows: Iterable[data.Posting]) -> None:
 
        self.account_groups = dict(core.RelatedPostings.group_by_account(rows))
 
        self.write_balance_sheet()
 
        tally_by_account_iter = (
 
            (account, sum(1 for post in related if post.meta.date in self.date_range))
 
            for account, related in self.account_groups.items()
 
        )
 
        tally_by_account = {
 
            account: count
 
            for account, count in tally_by_account_iter
 
            if count
 
        }
 
        sheet_names = self.plan_sheets(
 
            tally_by_account, self.required_sheet_names, self.sheet_size,
 
        )
 
        using_sheet_index = -1
 
        for sheet_index, account in self._sort_and_filter_accounts(
 
                tally_by_account, sheet_names,
 
        ):
 
            while using_sheet_index < sheet_index:
 
                using_sheet_index += 1
 
                self.start_sheet(sheet_names[using_sheet_index])
 
            super().write(self.account_groups[account])
 
        for index in range(using_sheet_index + 1, len(sheet_names)):
 
            self.start_sheet(sheet_names[index])
 

	
 

	
 
class ReturnFlag(enum.IntFlag):
 
    LOAD_ERRORS = 1
 
    NOTHING_TO_REPORT = 8
 

	
 

	
 
def parse_arguments(arglist: Optional[Sequence[str]]=None) -> argparse.Namespace:
 
    parser = argparse.ArgumentParser(prog=PROGNAME)
 
    cliutil.add_version_argument(parser)
 
    parser.add_argument(
 
        '--begin', '--start', '-b',
 
        dest='start_date',
 
        metavar='DATE',
 
        type=cliutil.date_arg,
 
        help="""Date to start reporting entries, inclusive, in YYYY-MM-DD format.
 
The default is one year ago.
 
""")
 
    parser.add_argument(
 
        '--end', '--stop', '-e',
 
        dest='stop_date',
 
        metavar='DATE',
 
        type=cliutil.date_arg,
 
        help="""Date to stop reporting entries, exclusive, in YYYY-MM-DD format.
 
The default is a year after the start date, or 30 days from today if the start
 
date was also not specified.
 
""")
 
    parser.add_argument(
 
        '--account', '-a',
 
        dest='sheet_names',
 
        metavar='ACCOUNT',
 
        action='append',
 
        help="""Show this account in the report. You can specify this option
 
multiple times. If not specified, the default set adapts to your search
 
criteria.
 
""")
 
    parser.add_argument(
 
        '--sheet-size', '--size',
 
        metavar='SIZE',
 
        type=int,
 
        default=LedgerODS.SHEET_SIZE,
 
        help="""Try to limit sheets to this many rows. The report will
 
automatically create new sheets to make this happen. When that's not possible,
 
it will issue a warning.
 
""")
 
    parser.add_argument(
 
        '--output-file', '-O',
 
        metavar='PATH',
 
        type=Path,
 
        help="""Write the report to this file, or stdout when PATH is `-`.
 
The default is stdout for the balance and outgoing reports, and a generated
 
filename for other reports.
 
""")
 
    cliutil.add_loglevel_argument(parser)
 
    parser.add_argument(
 
        'search_terms',
 
        metavar='FILTER',
 
        type=cliutil.SearchTerm.arg_parser('project', 'rt-id'),
 
        nargs=argparse.ZERO_OR_MORE,
 
        help="""Report on postings that match this criteria. The format is
 
NAME=TERM. TERM is a link or word that must exist in a posting's NAME
 
metadata to match. A single ticket number is a shortcut for
 
`rt-id=rt:NUMBER`. Any other word is a shortcut for `project=TERM`.
 
""")
 
    args = parser.parse_args(arglist)
 
    if args.sheet_names is None:
 
        if any(term.meta_key == 'project' for term in args.search_terms):
 
            args.sheet_names = ['Income', 'Expenses', 'Assets:Receivable', 'Liabilities:Payable']
 
        else:
 
            args.sheet_names = list(LedgerODS.ACCOUNT_COLUMNS)
 
    return args
 

	
 
def diff_year(date: datetime.date, diff: int) -> datetime.date:
 
    new_year = date.year + diff
 
    try:
 
        return date.replace(year=new_year)
 
    except ValueError:
 
        # The original date is Feb 29, which doesn't exist in the new year.
 
        if diff < 0:
 
            return datetime.date(new_year, 2, 28)
 
        else:
 
            return datetime.date(new_year, 3, 1)
 

	
 
def main(arglist: Optional[Sequence[str]]=None,
 
         stdout: TextIO=sys.stdout,
 
         stderr: TextIO=sys.stderr,
 
         config: Optional[configmod.Config]=None,
 
) -> int:
 
    args = parse_arguments(arglist)
 
    cliutil.set_loglevel(logger, args.loglevel)
 
    if config is None:
 
        config = configmod.Config()
 
        config.load_file()
 

	
 
    today = datetime.date.today()
 
    if args.start_date is None:
 
        args.start_date = diff_year(today, -1)
 
        if args.stop_date is None:
 
            args.stop_date = today + datetime.timedelta(days=30)
 
    elif args.stop_date is None:
 
        args.stop_date = diff_year(args.start_date, 1)
 

	
 
    returncode = 0
 
    books_loader = config.books_loader()
 
    if books_loader is None:
 
        entries, load_errors, _ = books.Loader.load_none(config.config_file_path())
 
    else:
 
        entries, load_errors, _ = books_loader.load_fy_range(args.start_date, args.stop_date)
 
    for error in load_errors:
 
        bc_printer.print_error(error, file=stderr)
 
        returncode |= ReturnFlag.LOAD_ERRORS
 

	
 
    postings = data.Posting.from_entries(entries)
 
    for search_term in args.search_terms:
 
        postings = search_term.filter_postings(postings)
 

	
 
    rt_wrapper = config.rt_wrapper()
 
    if rt_wrapper is None:
 
        logger.warning("could not initialize RT client; spreadsheet links will be broken")
 
    report = LedgerODS(
 
        args.start_date,
 
        args.stop_date,
 
        args.sheet_names,
 
        rt_wrapper,
 
        args.sheet_size,
 
    )
 
    report.write(postings)
 
    if not report.account_groups:
 
        logger.warning("no matching postings found to report")
 
        returncode |= ReturnFlag.NOTHING_TO_REPORT
 

	
 
    if args.output_file is None:
 
        out_dir_path = config.repository_path() or Path()
 
        args.output_file = out_dir_path / 'LedgerReport_{}_{}.ods'.format(
 
            args.start_date.isoformat(), args.stop_date.isoformat(),
 
        )
 
        logger.info("Writing report to %s", args.output_file)
 
    ods_file = cliutil.bytes_output(args.output_file, stdout)
 
    report.save_file(ods_file)
 
    return 0 if returncode == 0 else 16 + returncode
 

	
 
entry_point = cliutil.make_entry_point(__name__, PROGNAME)
 

	
 
if __name__ == '__main__':
 
    exit(entry_point())
setup.py
Show inline comments
 
#!/usr/bin/env python3
 

	
 
from setuptools import setup
 

	
 
setup(
 
    name='conservancy_beancount',
 
    description="Plugin, library, and reports for reading Conservancy's books",
 
    version='1.2.3',
 
    version='1.2.4',
 
    author='Software Freedom Conservancy',
 
    author_email='info@sfconservancy.org',
 
    license='GNU AGPLv3+',
 

	
 
    install_requires=[
 
        'babel>=2.6',  # Debian:python3-babel
 
        'beancount>=2.2',  # Debian:beancount
 
        # 1.4.1 crashes when trying to save some documents.
 
        'odfpy>=1.4.0,!=1.4.1',  # Debian:python3-odf
 
        'PyYAML>=3.0',  # Debian:python3-yaml
 
        'regex',  # Debian:python3-regex
 
        'rt>=2.0',
 
    ],
 
    setup_requires=[
 
        'pytest-mypy',
 
        'pytest-runner',  # Debian:python3-pytest-runner
 
    ],
 
    tests_require=[
 
        'mypy>=0.770',  # Debian:python3-mypy
 
        'pytest',  # Debian:python3-pytest
 
    ],
 

	
 
    packages=[
 
        'conservancy_beancount',
 
        'conservancy_beancount.plugin',
 
        'conservancy_beancount.reports',
 
    ],
 
    entry_points={
 
        'console_scripts': [
 
            'accrual-report = conservancy_beancount.reports.accrual:entry_point',
 
            'ledger-report = conservancy_beancount.reports.ledger:entry_point',
 
        ],
 
    },
 
)
tests/test_reports_spreadsheet.py
Show inline comments
 
"""test_reports_spreadsheet - Unit tests for spreadsheet classes"""
 
# Copyright © 2020  Brett Smith
 
#
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU Affero General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU Affero General Public License for more details.
 
#
 
# You should have received a copy of the GNU Affero General Public License
 
# along with this program.  If not, see <https://www.gnu.org/licenses/>.
 

	
 
import datetime
 
import io
 
import itertools
 

	
 
import pytest
 

	
 
import babel.core
 
import babel.numbers
 
import odf.config
 
import odf.number
 
import odf.style
 
import odf.table
 
import odf.text
 

	
 
from decimal import Decimal
 

	
 
from . import testutil
 

	
 
from conservancy_beancount import rtutil
 
from conservancy_beancount.reports import core
 

	
 
EN_US = babel.core.Locale('en', 'US')
 

	
 
XML_NAMES_LIST = [None, 'ce2', 'xml_testname']
 
XML_NAMES = itertools.cycle(XML_NAMES_LIST)
 

	
 
CURRENCY_CELL_DATA = [
 
    (Decimal('10.101010'), 'BRL'),
 
    (Decimal('-50.50'), 'GBP'),
 
]
 

	
 
LINK_CELL_DATA = [
 
    'https://example.org',
 
    ('https://example.net', None),
 
    ('https://example.com', 'Example Site'),
 
]
 

	
 
NUMERIC_CELL_DATA = [
 
    42,
 
    42.42,
 
    Decimal('42.42'),
 
]
 

	
 
STRING_CELL_DATA = [
 
    'Example String',
 
    LINK_CELL_DATA[0],
 
]
 

	
 
class BaseTester(core.BaseSpreadsheet[tuple, str]):
 
    def __init__(self):
 
        self.start_call = None
 
        self.end_call = None
 
        self.started_sections = []
 
        self.ended_sections = []
 
        self.written_rows = []
 

	
 
    def section_key(self, row):
 
        return row[0]
 

	
 
    def start_spreadsheet(self):
 
        self.start_call = self.started_sections.copy()
 

	
 
    def start_section(self, key):
 
        self.started_sections.append(key)
 

	
 
    def end_section(self, key):
 
        self.ended_sections.append(key)
 

	
 
    def end_spreadsheet(self):
 
        self.end_call = self.ended_sections.copy()
 

	
 
    def write_row(self, key):
 
        self.written_rows.append(key)
 

	
 

	
 
class ODSTester(core.BaseODS[tuple, str]):
 
    def section_key(self, row):
 
        return row[0]
 

	
 

	
 
@pytest.fixture
 
def spreadsheet():
 
    return BaseTester()
 

	
 
@pytest.fixture
 
def ods_writer():
 
    retval = ODSTester()
 
    retval.locale = EN_US
 
    return retval
 

	
 
def get_children(parent, child_type, **kwargs):
 
    return [elem for elem in parent.getElementsByType(child_type)
 
            if all(elem.getAttribute(k) == v for k, v in kwargs.items())]
 

	
 
def get_child(parent, child_type, index=-1, **kwargs):
 
    try:
 
        return get_children(parent, child_type, **kwargs)[index]
 
    except IndexError:
 
        raise ValueError("no matching child found") from None
 

	
 
def iter_text(parent):
 
    for child in parent.childNodes:
 
        if isinstance(child, odf.element.Text):
 
            yield child.data
 
        else:
 
            yield from iter_text(child)
 

	
 
def get_text(parent, joiner=''):
 
    return joiner.join(iter_text(parent))
 

	
 
def check_currency_style(curr_style):
 
    child_names = {child.tagName for child in curr_style.childNodes}
 
    assert odf.number.Number().tagName in child_names
 
    assert len(child_names) > 1
 

	
 
def test_spreadsheet(spreadsheet):
 
    rows = [(ch, ii) for ii, ch in enumerate('aabbcc', 1)]
 
    spreadsheet.write(iter(rows))
 
    assert spreadsheet.written_rows == rows
 
    assert spreadsheet.ended_sections == spreadsheet.started_sections
 
    assert spreadsheet.started_sections == list('abc')
 
    assert spreadsheet.start_call == []
 
    assert spreadsheet.end_call == spreadsheet.ended_sections
 

	
 
def test_empty_spreadsheet(spreadsheet):
 
    empty_list = []
 
    spreadsheet.write(iter(empty_list))
 
    assert spreadsheet.start_call == empty_list
 
    assert spreadsheet.end_call == empty_list
 
    assert spreadsheet.started_sections == empty_list
 
    assert spreadsheet.ended_sections == empty_list
 
    assert spreadsheet.written_rows == empty_list
 

	
 
def test_one_section_spreadsheet(spreadsheet):
 
    rows = [('A', n) for n in range(1, 4)]
 
    spreadsheet.write(iter(rows))
 
    assert spreadsheet.written_rows == rows
 
    assert spreadsheet.ended_sections == spreadsheet.started_sections
 
    assert spreadsheet.started_sections == list('A')
 
    assert spreadsheet.start_call == []
 
    assert spreadsheet.end_call == spreadsheet.ended_sections
 

	
 
def test_ods_writer(ods_writer):
 
    rows = [(ch, ii) for ii, ch in enumerate('aabbcc', 1)]
 
    ods_writer.write(iter(rows))
 
    sheets = ods_writer.document.getElementsByType(odf.table.Table)
 
    assert len(sheets) == 1
 
    for exp_row, act_row in zip(rows, testutil.ODSCell.from_sheet(sheets[0])):
 
        expected1, expected2 = exp_row
 
        actual1, actual2 = act_row
 
        assert actual1.value_type == 'string'
 
        assert actual1.text == expected1
 
        assert actual2.value_type == 'float'
 
        assert actual2.value == expected2
 
        assert actual2.text == str(expected2)
 

	
 
@pytest.mark.parametrize('save_type', ['file', 'path'])
 
def test_ods_writer_save(tmp_path, save_type):
 
    rows = list(zip('ABC', 'abc'))
 
    ods_writer = ODSTester()
 
    ods_writer.write(iter(rows))
 
    if save_type == 'file':
 
        ods_output = io.BytesIO()
 
        ods_writer.save_file(ods_output)
 
        ods_output.seek(0)
 
    else:
 
        ods_output = tmp_path / 'SavePathTest.ods'
 
        ods_writer.save_path(ods_output)
 
    for exp_row, act_row in zip(rows, testutil.ODSCell.from_ods_file(ods_output)):
 
        assert len(exp_row) == len(act_row)
 
        for expected, actual in zip(exp_row, act_row):
 
            assert actual.value_type == 'string'
 
            assert actual.value is None
 
            assert actual.text == expected
 

	
 
def test_ods_writer_use_sheet(ods_writer):
 
    names = ['One', 'Two']
 
    for name in names:
 
        ods_writer.use_sheet(name)
 
        ods_writer.write([(name,)])
 
    ods_writer.use_sheet('End')
 
    sheets = ods_writer.document.getElementsByType(odf.table.Table)
 
    assert len(sheets) == len(names) + 1
 
    for name, sheet in zip(names, sheets):
 
        texts = [cell.text for row in testutil.ODSCell.from_sheet(sheet)
 
                 for cell in row]
 
        assert texts == [name]
 

	
 
def test_ods_writer_use_sheet_returns_to_prior_sheets(ods_writer):
 
    names = ['One', 'Two']
 
    sheets = []
 
    for name in names:
 
        sheets.append(ods_writer.use_sheet(name))
 
        ods_writer.write([(name,)])
 
    for name, expected in zip(names, sheets):
 
        actual = ods_writer.use_sheet(name)
 
        assert actual is expected
 
        texts = [cell.text for row in testutil.ODSCell.from_sheet(actual)
 
                 for cell in row]
 
        assert texts == [name]
 

	
 
def test_ods_writer_use_sheet_discards_unused_sheets(ods_writer):
 
    ods_writer.use_sheet('Three')
 
    ods_writer.use_sheet('Two')
 
    ods_writer.use_sheet('One')
 
    sheets = ods_writer.document.getElementsByType(odf.table.Table)
 
    assert len(sheets) == 1
 
    assert sheets[0].getAttribute('name') == 'One'
 

	
 
@pytest.mark.parametrize('width,expect_name', [
 
    ('.750', 'col_0_75in'),
 
    (2, 'col_2in'),
 
    ('2.2in', 'col_2_2in'),
 
    (3.5, 'col_3_5in'),
 
    ('4cm', 'col_4cm'),
 
])
 
def test_ods_column_style(ods_writer, width, expect_name):
 
    style = ods_writer.column_style(width)
 
    assert style.getAttribute('name') == expect_name
 
    assert style.getAttribute('family') == 'table-column'
 
    curr_style = get_child(
 
        ods_writer.document.automaticstyles,
 
        odf.style.Style,
 
        name=expect_name,
 
    )
 
    assert get_child(
 
        curr_style,
 
        odf.style.TableColumnProperties,
 
        columnwidth=expect_name[4:].replace('_', '.'),
 
    )
 

	
 
def test_ods_column_style_caches(ods_writer):
 
    int_width = ods_writer.column_style('1in')
 
    float_width = ods_writer.column_style('1.00in')
 
    assert int_width is float_width
 

	
 
@pytest.mark.parametrize('width', [
 
    '1mi',
 
    '0in',
 
    '-1cm',
 
    'in',
 
    '.cm',
 
])
 
def test_ods_column_style_invalid_width(ods_writer, width):
 
    with pytest.raises(ValueError):
 
        ods_writer.column_style(width)
 

	
 
@pytest.mark.parametrize('currency_code', [
 
    'USD',
 
    'EUR',
 
    'BRL',
 
])
 
def test_ods_currency_style(ods_writer, currency_code):
 
    style = ods_writer.currency_style(currency_code)
 
    assert style.getAttribute('family') == 'table-cell'
 
    curr_style = get_child(
 
        ods_writer.document.styles,
 
        odf.number.CurrencyStyle,
 
        name=style.getAttribute('datastylename'),
 
    )
 
    check_currency_style(curr_style)
 
    mappings = get_children(curr_style, odf.style.Map)
 
    assert mappings
 
    for mapping in mappings:
 
        check_currency_style(get_child(
 
            ods_writer.document.styles,
 
            odf.number.CurrencyStyle,
 
            name=mapping.getAttribute('applystylename'),
 
        ))
 

	
 
def test_ods_currency_style_caches(ods_writer):
 
    expected = ods_writer.currency_style('USD')
 
    _ = ods_writer.currency_style('EUR')
 
    actual = ods_writer.currency_style('USD')
 
    assert actual is expected
 

	
 
def test_ods_currency_style_cache_considers_properties(ods_writer):
 
    bold_text = odf.style.TextProperties(fontweight='bold')
 
    plain = ods_writer.currency_style('USD')
 
    bold = ods_writer.currency_style('USD', positive_properties=bold_text)
 
    assert plain is not bold
 
    assert plain.getAttribute('name') != bold.getAttribute('name')
 
    assert plain.getAttribute('datastylename') != bold.getAttribute('datastylename')
 

	
 
@pytest.mark.parametrize('attr_name,child_type,checked_attr', [
 
    ('style_col1', odf.style.TableColumnProperties, 'columnwidth'),
 
    ('style_col1_25', odf.style.TableColumnProperties, 'columnwidth'),
 
    ('style_col1_5', odf.style.TableColumnProperties, 'columnwidth'),
 
    ('style_col1_75', odf.style.TableColumnProperties, 'columnwidth'),
 
    ('style_col2', odf.style.TableColumnProperties, 'columnwidth'),
 
    ('style_bold', odf.style.TextProperties, 'fontweight'),
 
    ('style_centertext', odf.style.ParagraphProperties, 'textalign'),
 
    ('style_dividerline', odf.style.TableCellProperties, 'borderbottom'),
 
    ('style_endtext', odf.style.ParagraphProperties, 'textalign'),
 
    ('style_starttext', odf.style.ParagraphProperties, 'textalign'),
 
])
 
def test_ods_writer_style(ods_writer, attr_name, child_type, checked_attr):
 
    if child_type is odf.style.TableColumnProperties:
 
        root = ods_writer.document.automaticstyles
 
    else:
 
        root = ods_writer.document.styles
 
    root = ods_writer.document.styles
 
    style = getattr(ods_writer, attr_name)
 
    actual = get_child(root, odf.style.Style, name=style.getAttribute('name'))
 
    assert actual is style
 
    child = get_child(actual, child_type)
 
    assert child.getAttribute(checked_attr)
 

	
 
def test_ods_writer_merge_styles(ods_writer):
 
    style = ods_writer.merge_styles(ods_writer.style_bold, ods_writer.style_dividerline)
 
    actual = get_child(
 
        ods_writer.document.styles,
 
        odf.style.Style,
 
        name=style.getAttribute('name'),
 
    )
 
    assert actual is style
 
    assert actual.getAttribute('family') == 'table-cell'
 
    text_props = get_child(actual, odf.style.TextProperties)
 
    assert text_props.getAttribute('fontweight') == 'bold'
 
    cell_props = get_child(actual, odf.style.TableCellProperties)
 
    assert cell_props.getAttribute('borderbottom')
 

	
 
def test_ods_writer_merge_styles_with_children_and_attributes(ods_writer):
 
    jpy_style = ods_writer.currency_style('JPY')
 
    style = ods_writer.merge_styles(ods_writer.style_bold, jpy_style)
 
    actual = get_child(
 
        ods_writer.document.styles,
 
        odf.style.Style,
 
        name=style.getAttribute('name'),
 
    )
 
    assert actual is style
 
    assert actual.getAttribute('family') == 'table-cell'
 
    assert actual.getAttribute('datastylename') == jpy_style.getAttribute('datastylename')
 
    text_props = get_child(actual, odf.style.TextProperties)
 
    assert text_props.getAttribute('fontweight') == 'bold'
 

	
 
def test_ods_writer_merge_styles_caches(ods_writer):
 
    sources = [ods_writer.style_bold, ods_writer.style_dividerline]
 
    style1 = ods_writer.merge_styles(*sources)
 
    style2 = ods_writer.merge_styles(*reversed(sources))
 
    assert style1 is style2
 
    assert get_child(
 
        ods_writer.document.styles,
 
        odf.style.Style,
 
        name=style1.getAttribute('name'),
 
    )
 

	
 
def test_ods_writer_layer_merge_styles(ods_writer):
 
    usd_style = ods_writer.currency_style('USD')
 
    layer1 = ods_writer.merge_styles(ods_writer.style_bold, ods_writer.style_dividerline)
 
    layer2 = ods_writer.merge_styles(layer1, usd_style)
 
    style_name = layer2.getAttribute('name')
 
    assert style_name.count('Merge_') == 1
 
    actual = get_child(
 
        ods_writer.document.styles,
 
        odf.style.Style,
 
        name=style_name,
 
    )
 
    assert actual is layer2
 
    assert actual.getAttribute('family') == 'table-cell'
 
    assert actual.getAttribute('datastylename') == usd_style.getAttribute('datastylename')
 
    text_props = get_child(actual, odf.style.TextProperties)
 
    assert text_props.getAttribute('fontweight') == 'bold'
 
    cell_props = get_child(actual, odf.style.TableCellProperties)
 
    assert cell_props.getAttribute('borderbottom')
 

	
 
def test_ods_writer_merge_one_style(ods_writer):
 
    actual = ods_writer.merge_styles(None, ods_writer.style_bold)
 
    assert actual is ods_writer.style_bold
 

	
 
def test_ods_writer_merge_no_styles(ods_writer):
 
    assert ods_writer.merge_styles() is None
 

	
 
def test_ods_writer_merge_nonexistent_style(ods_writer):
 
    name = 'Non Existent Style'
 
    with pytest.raises(ValueError, match=repr(name)):
 
        ods_writer.merge_styles(ods_writer.style_bold, name)
 

	
 
def test_ods_writer_merge_conflicting_styles(ods_writer):
 
    sources = [ods_writer.currency_style(code) for code in ['USD', 'EUR']]
 
    with pytest.raises(ValueError, match='conflicting datastylename'):
 
        ods_writer.merge_styles(*sources)
 

	
 
def test_ods_writer_date_style(ods_writer):
 
    data_style_name = ods_writer.style_date.getAttribute('datastylename')
 
    actual = get_child(
 
        ods_writer.document.styles,
 
        odf.style.Style,
 
        family='table-cell',
 
        datastylename=data_style_name,
 
    )
 
    assert actual is ods_writer.style_date
 
    data_style = get_child(
 
        ods_writer.document.styles,
 
        odf.number.DateStyle,
 
        name=data_style_name,
 
    )
 
    assert len(data_style.childNodes) == 5
 
    year, t1, month, t2, day = data_style.childNodes
 
    assert year.qname[1] == 'year'
 
    assert year.getAttribute('style') == 'long'
 
    assert get_text(t1) == '-'
 
    assert month.qname[1] == 'month'
 
    assert month.getAttribute('style') == 'long'
 
    assert get_text(t2) == '-'
 
    assert day.qname[1] == 'day'
 
    assert day.getAttribute('style') == 'long'
 

	
 
def test_ods_lock_first_row(ods_writer):
 
    ods_writer.lock_first_row()
 
    view_settings = get_child(
 
        ods_writer.document.settings,
 
        odf.config.ConfigItemSet,
 
        name='ooo:view-settings',
 
    )
 
    views = get_child(view_settings, odf.config.ConfigItemMapIndexed, name='Views')
 
    view1 = get_child(views, odf.config.ConfigItemMapEntry, index=0)
 
    config_map = get_child(view1, odf.config.ConfigItemMapNamed, name='Tables')
 
    sheet_name = ods_writer.sheet.getAttribute('name')
 
    config_entry = get_child(config_map, odf.config.ConfigItemMapEntry, name=sheet_name)
 
    for name, ctype, value in [
 
            ('PositionBottom', 'int', '1'),
 
            ('VerticalSplitMode', 'short', '2'),
 
            ('VerticalSplitPosition', 'short', '1'),
 
    ]:
 
        child = get_child(config_entry, odf.config.ConfigItem, name=name)
 
        assert child.getAttribute('type') == ctype
 
        assert child.firstChild.data == value
 

	
 
@pytest.mark.parametrize('style_name', XML_NAMES_LIST)
 
def test_ods_writer_add_row(ods_writer, style_name):
 
    cell1 = ods_writer.string_cell('one')
 
    cell2 = ods_writer.float_cell(42.0)
 
    row = ods_writer.add_row(cell1, cell2, defaultcellstylename=style_name)
 
    assert ods_writer.sheet.lastChild is row
 
    assert row.getAttribute('defaultcellstylename') == style_name
 
    assert row.firstChild is cell1
 
    assert row.lastChild is cell2
 

	
 
def test_ods_writer_add_row_single_cell(ods_writer):
 
    cell = ods_writer.multilink_cell(LINK_CELL_DATA[:1])
 
    row = ods_writer.add_row(cell)
 
    assert ods_writer.sheet.lastChild is row
 
    assert row.firstChild is cell
 
    assert row.lastChild is cell
 

	
 
def test_ods_writer_add_row_empty(ods_writer):
 
    row = ods_writer.add_row(stylename='blank')
 
    assert ods_writer.sheet.lastChild is row
 
    assert row.firstChild is None
 
    assert row.getAttribute('stylename') == 'blank'
 

	
 
def test_ods_writer_balance_cell_empty(ods_writer):
 
    balance = core.Balance()
 
    cell = ods_writer.balance_cell(balance)
 
    assert cell.value_type != 'string'
 
    assert float(cell.value) == 0
 

	
 
def test_ods_writer_balance_cell_single_currency(ods_writer):
 
    number = 250
 
    currency = 'EUR'
 
    balance = core.Balance([testutil.Amount(number, currency)])
 
    cell = ods_writer.balance_cell(balance)
 
    assert cell.value_type == 'currency'
 
    assert Decimal(cell.value) == number
 
    assert cell.text == babel.numbers.format_currency(
 
        number, currency, locale=EN_US, format_type='accounting',
 
    )
 

	
 
def test_ods_writer_balance_cell_multi_currency(ods_writer):
 
    amounts = [testutil.Amount(num, code) for num, code in [
 
        (2500, 'RUB'),
 
        (3500, 'BRL'),
 
    ]]
 
    balance = core.Balance(amounts)
 
    cell = ods_writer.balance_cell(balance)
 
    assert cell.text == '\0'.join(babel.numbers.format_currency(
 
        number, currency, locale=EN_US, format_type='accounting',
 
    ) for number, currency in amounts)
 

	
 
@pytest.mark.parametrize('cell_source,style_name', testutil.combine_values(
 
    CURRENCY_CELL_DATA,
 
    XML_NAMES,
 
))
 
def test_ods_writer_currency_cell(ods_writer, cell_source, style_name):
 
    cell = ods_writer.currency_cell(cell_source, stylename=style_name)
 
    number, currency = cell_source
 
    assert cell.getAttribute('valuetype') == 'currency'
 
    assert cell.getAttribute('value') == str(number)
 
    assert cell.getAttribute('stylename') == style_name
 
    expected = babel.numbers.format_currency(
 
        number, currency, locale=EN_US, format_type='accounting',
 
    )
 
    assert get_text(cell) == expected
 

	
 
@pytest.mark.parametrize('currency', [
 
    'EUR',
 
    'CHF',
 
    'GBP',
 
])
 
def test_ods_writer_currency_cell_default_style(ods_writer, currency):
 
    amount = testutil.Amount(1000, currency)
 
    expected_stylename = ods_writer.currency_style(currency).getAttribute('name')
 
    cell = ods_writer.currency_cell(amount)
 
    assert cell.getAttribute('valuetype') == 'currency'
 
    assert cell.getAttribute('value') == '1000'
 
    assert cell.getAttribute('stylename') == expected_stylename
 

	
 
@pytest.mark.parametrize('date,style_name', testutil.combine_values(
 
    [datetime.date(1980, 2, 5), datetime.date(2030, 10, 30)],
 
    XML_NAMES_LIST,
 
))
 
def test_ods_writer_date_cell(ods_writer, date, style_name):
 
    if style_name is None:
 
        expect_style = ods_writer.style_date.getAttribute('name')
 
        cell = ods_writer.date_cell(date)
 
    else:
 
        expect_style = style_name
 
        cell = ods_writer.date_cell(date, stylename=style_name)
 
    date_s = date.isoformat()
 
    assert cell.getAttribute('valuetype') == 'date'
 
    assert cell.getAttribute('datevalue') == date_s
 
    assert cell.getAttribute('stylename') == expect_style
 
    assert get_text(cell) == date_s
 

	
 
@pytest.mark.parametrize('cell_source,style_name', testutil.combine_values(
 
    NUMERIC_CELL_DATA,
 
    XML_NAMES,
 
))
 
def test_ods_writer_float_cell(ods_writer, cell_source, style_name):
 
    cell = ods_writer.float_cell(cell_source, stylename=style_name)
 
    assert cell.getAttribute('valuetype') == 'float'
 
    assert cell.getAttribute('stylename') == style_name
 
    expected = str(cell_source)
 
    assert cell.getAttribute('value') == expected
 
    assert get_text(cell) == expected
 

	
 
def test_ods_writer_meta_links_cell(ods_writer):
 
    rt_client = testutil.RTClient()
 
    ods_writer.rt_wrapper = rtutil.RT(rt_client)
 
    rt_url = rt_client.DEFAULT_URL[:-10]
 
    meta_links = [
 
        'rt://ticket/1',
 
        'rt://ticket/2/attachments/9',
 
        'rt:1/5',
 
        'Invoices/0123.pdf',
 
    ]
 
    cell = ods_writer.meta_links_cell(meta_links, stylename='meta1')
 
    assert cell.getAttribute('valuetype') == 'string'
 
    assert cell.getAttribute('stylename') == 'meta1'
 
    children = iter(get_children(cell, odf.text.A))
 
    child = next(children)
 
    assert child.getAttribute('type') == 'simple'
 
    expect_url = f'{rt_url}/Ticket/Display.html?id=1'
 
    assert child.getAttribute('href') == expect_url
 
    assert get_text(child) == 'rt:1'
 
    child = next(children)
 
    assert child.getAttribute('type') == 'simple'
 
    expect_url = f'{rt_url}/Ticket/Display.html?id=2#txn-7'
 
    assert child.getAttribute('href') == expect_url
 
    assert get_text(child) == 'rt:2/9'
 
    child = next(children)
 
    assert child.getAttribute('type') == 'simple'
 
    expect_url = f'{rt_url}/Ticket/Attachment/1/5/photo.jpg'
 
    assert child.getAttribute('href') == expect_url
 
    assert get_text(child) == 'photo.jpg'
 
    child = next(children)
 
    assert child.getAttribute('type') == 'simple'
 
    expect_url = f'../{meta_links[3]}'
 
    assert child.getAttribute('href') == expect_url
 
    assert get_text(child) == '0123.pdf'
 

	
 
def test_ods_writer_multiline_cell(ods_writer):
 
    cell = ods_writer.multiline_cell(iter(STRING_CELL_DATA))
 
    assert cell.getAttribute('valuetype') == 'string'
 
    children = get_children(cell, odf.text.P)
 
    for expected, child in itertools.zip_longest(STRING_CELL_DATA, children):
 
        assert get_text(child) == expected
 

	
 
@pytest.mark.parametrize('cell_source,style_name', testutil.combine_values(
 
    LINK_CELL_DATA,
 
    XML_NAMES,
 
))
 
def test_ods_writer_multilink_singleton(ods_writer, cell_source, style_name):
 
    cell = ods_writer.multilink_cell([cell_source], stylename=style_name)
 
    assert cell.getAttribute('valuetype') == 'string'
 
    assert cell.getAttribute('stylename') == style_name
 
    try:
 
        href, text = cell_source
 
    except ValueError:
 
        href = cell_source
 
        text = None
 
    anchor = get_child(cell, odf.text.A, type='simple', href=href)
 
    assert get_text(anchor) == (text or href)
 

	
 
def test_ods_writer_multilink_cell(ods_writer):
 
    cell = ods_writer.multilink_cell(iter(LINK_CELL_DATA))
 
    assert cell.getAttribute('valuetype') == 'string'
 
    children = get_children(cell, odf.text.A)
 
    for source, child in itertools.zip_longest(LINK_CELL_DATA, children):
 
        try:
 
            href, text = source
 
        except ValueError:
 
            href = source
 
            text = None
 
        assert child.getAttribute('type') == 'simple'
 
        assert child.getAttribute('href') == href
 
        assert get_text(child) == (text or href)
 

	
 
@pytest.mark.parametrize('cell_source,style_name', testutil.combine_values(
 
    STRING_CELL_DATA,
 
    XML_NAMES,
 
))
 
def test_ods_writer_string_cell(ods_writer, cell_source, style_name):
 
    cell = ods_writer.string_cell(cell_source, stylename=style_name)
 
    assert cell.getAttribute('valuetype') == 'string'
 
    assert cell.getAttribute('stylename') == style_name
 
    assert get_text(cell) == str(cell_source)
0 comments (0 inline, 0 general)