Update all files to use LR (windows)

This commit is contained in:
Martin Tan 2023-08-02 13:14:16 +08:00
parent 17d69224d9
commit e9ae9ad2eb
13 changed files with 4489 additions and 4489 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +1,64 @@
import dataclasses import dataclasses
import datetime import datetime
import enum import enum
from pathlib import Path from pathlib import Path
from typing import List from typing import List
from typing import Optional from typing import Optional
import magic import magic
@dataclasses.dataclass @dataclasses.dataclass
class DocumentMetadataOverrides: class DocumentMetadataOverrides:
""" """
Manages overrides for document fields which normally would Manages overrides for document fields which normally would
be set from content or matching. All fields default to None, be set from content or matching. All fields default to None,
meaning no override is happening meaning no override is happening
""" """
filename: Optional[str] = None filename: Optional[str] = None
title: Optional[str] = None title: Optional[str] = None
correspondent_id: Optional[int] = None correspondent_id: Optional[int] = None
document_type_id: Optional[int] = None document_type_id: Optional[int] = None
tag_ids: Optional[List[int]] = None tag_ids: Optional[List[int]] = None
created: Optional[datetime.datetime] = None created: Optional[datetime.datetime] = None
asn: Optional[int] = None asn: Optional[int] = None
owner_id: Optional[int] = None owner_id: Optional[int] = None
storage_path_id: Optional[int] = None storage_path_id: Optional[int] = None
full_path: Optional[str] = None full_path: Optional[str] = None
class DocumentSource(enum.IntEnum): class DocumentSource(enum.IntEnum):
""" """
The source of an incoming document. May have other uses in the future The source of an incoming document. May have other uses in the future
""" """
ConsumeFolder = enum.auto() ConsumeFolder = enum.auto()
ApiUpload = enum.auto() ApiUpload = enum.auto()
MailFetch = enum.auto() MailFetch = enum.auto()
@dataclasses.dataclass @dataclasses.dataclass
class ConsumableDocument: class ConsumableDocument:
""" """
Encapsulates an incoming document, either from consume folder, API upload Encapsulates an incoming document, either from consume folder, API upload
or mail fetching and certain useful operations on it. or mail fetching and certain useful operations on it.
""" """
source: DocumentSource source: DocumentSource
original_file: Path original_file: Path
mime_type: str = dataclasses.field(init=False, default=None) mime_type: str = dataclasses.field(init=False, default=None)
def __post_init__(self): def __post_init__(self):
""" """
After a dataclass is initialized, this is called to finalize some data After a dataclass is initialized, this is called to finalize some data
1. Make sure the original path is an absolute, fully qualified path 1. Make sure the original path is an absolute, fully qualified path
2. Get the mime type of the file 2. Get the mime type of the file
""" """
# Always fully qualify the path first thing # Always fully qualify the path first thing
# Just in case, convert to a path if it's a str # Just in case, convert to a path if it's a str
self.original_file = Path(self.original_file).resolve() self.original_file = Path(self.original_file).resolve()
# Get the file type once at init # Get the file type once at init
# Note this function isn't called when the object is unpickled # Note this function isn't called when the object is unpickled
self.mime_type = magic.from_file(self.original_file, mime=True) self.mime_type = magic.from_file(self.original_file, mime=True)

View File

@ -1,159 +1,159 @@
from django.db.models import Q from django.db.models import Q
from django_filters.rest_framework import BooleanFilter from django_filters.rest_framework import BooleanFilter
from django_filters.rest_framework import Filter from django_filters.rest_framework import Filter
from django_filters.rest_framework import FilterSet from django_filters.rest_framework import FilterSet
from rest_framework_guardian.filters import ObjectPermissionsFilter from rest_framework_guardian.filters import ObjectPermissionsFilter
from .models import Correspondent from .models import Correspondent
from .models import Document from .models import Document
from .models import DocumentType from .models import DocumentType
from .models import Log from .models import Log
from .models import StoragePath from .models import StoragePath
from .models import Tag from .models import Tag
CHAR_KWARGS = ["istartswith", "iendswith", "icontains", "iexact"] CHAR_KWARGS = ["istartswith", "iendswith", "icontains", "iexact"]
ID_KWARGS = ["in", "exact"] ID_KWARGS = ["in", "exact"]
INT_KWARGS = ["exact", "gt", "gte", "lt", "lte", "isnull"] INT_KWARGS = ["exact", "gt", "gte", "lt", "lte", "isnull"]
DATE_KWARGS = ["year", "month", "day", "date__gt", "gt", "date__lt", "lt"] DATE_KWARGS = ["year", "month", "day", "date__gt", "gt", "date__lt", "lt"]
class CorrespondentFilterSet(FilterSet): class CorrespondentFilterSet(FilterSet):
class Meta: class Meta:
model = Correspondent model = Correspondent
fields = {"name": CHAR_KWARGS} fields = {"name": CHAR_KWARGS}
class TagFilterSet(FilterSet): class TagFilterSet(FilterSet):
class Meta: class Meta:
model = Tag model = Tag
fields = {"name": CHAR_KWARGS} fields = {"name": CHAR_KWARGS}
class DocumentTypeFilterSet(FilterSet): class DocumentTypeFilterSet(FilterSet):
class Meta: class Meta:
model = DocumentType model = DocumentType
fields = {"name": CHAR_KWARGS} fields = {"name": CHAR_KWARGS}
class ObjectFilter(Filter): class ObjectFilter(Filter):
def __init__(self, exclude=False, in_list=False, field_name=""): def __init__(self, exclude=False, in_list=False, field_name=""):
super().__init__() super().__init__()
self.exclude = exclude self.exclude = exclude
self.in_list = in_list self.in_list = in_list
self.field_name = field_name self.field_name = field_name
def filter(self, qs, value): def filter(self, qs, value):
if not value: if not value:
return qs return qs
try: try:
object_ids = [int(x) for x in value.split(",")] object_ids = [int(x) for x in value.split(",")]
except ValueError: except ValueError:
return qs return qs
if self.in_list: if self.in_list:
qs = qs.filter(**{f"{self.field_name}__id__in": object_ids}).distinct() qs = qs.filter(**{f"{self.field_name}__id__in": object_ids}).distinct()
else: else:
for obj_id in object_ids: for obj_id in object_ids:
if self.exclude: if self.exclude:
qs = qs.exclude(**{f"{self.field_name}__id": obj_id}) qs = qs.exclude(**{f"{self.field_name}__id": obj_id})
else: else:
qs = qs.filter(**{f"{self.field_name}__id": obj_id}) qs = qs.filter(**{f"{self.field_name}__id": obj_id})
return qs return qs
class InboxFilter(Filter): class InboxFilter(Filter):
def filter(self, qs, value): def filter(self, qs, value):
if value == "true": if value == "true":
return qs.filter(tags__is_inbox_tag=True) return qs.filter(tags__is_inbox_tag=True)
elif value == "false": elif value == "false":
return qs.exclude(tags__is_inbox_tag=True) return qs.exclude(tags__is_inbox_tag=True)
else: else:
return qs return qs
class TitleContentFilter(Filter): class TitleContentFilter(Filter):
def filter(self, qs, value): def filter(self, qs, value):
if value: if value:
return qs.filter(Q(title__icontains=value) | Q(content__icontains=value)) return qs.filter(Q(title__icontains=value) | Q(content__icontains=value))
else: else:
return qs return qs
class DocumentFilterSet(FilterSet): class DocumentFilterSet(FilterSet):
is_tagged = BooleanFilter( is_tagged = BooleanFilter(
label="Is tagged", label="Is tagged",
field_name="tags", field_name="tags",
lookup_expr="isnull", lookup_expr="isnull",
exclude=True, exclude=True,
) )
tags__id__all = ObjectFilter(field_name="tags") tags__id__all = ObjectFilter(field_name="tags")
tags__id__none = ObjectFilter(field_name="tags", exclude=True) tags__id__none = ObjectFilter(field_name="tags", exclude=True)
tags__id__in = ObjectFilter(field_name="tags", in_list=True) tags__id__in = ObjectFilter(field_name="tags", in_list=True)
correspondent__id__none = ObjectFilter(field_name="correspondent", exclude=True) correspondent__id__none = ObjectFilter(field_name="correspondent", exclude=True)
document_type__id__none = ObjectFilter(field_name="document_type", exclude=True) document_type__id__none = ObjectFilter(field_name="document_type", exclude=True)
storage_path__id__none = ObjectFilter(field_name="storage_path", exclude=True) storage_path__id__none = ObjectFilter(field_name="storage_path", exclude=True)
is_in_inbox = InboxFilter() is_in_inbox = InboxFilter()
title_content = TitleContentFilter() title_content = TitleContentFilter()
class Meta: class Meta:
model = Document model = Document
fields = { fields = {
"title": CHAR_KWARGS, "title": CHAR_KWARGS,
"content": CHAR_KWARGS, "content": CHAR_KWARGS,
"archive_serial_number": INT_KWARGS, "archive_serial_number": INT_KWARGS,
"created": DATE_KWARGS, "created": DATE_KWARGS,
"added": DATE_KWARGS, "added": DATE_KWARGS,
"modified": DATE_KWARGS, "modified": DATE_KWARGS,
"correspondent": ["isnull"], "correspondent": ["isnull"],
"correspondent__id": ID_KWARGS, "correspondent__id": ID_KWARGS,
"correspondent__name": CHAR_KWARGS, "correspondent__name": CHAR_KWARGS,
"tags__id": ID_KWARGS, "tags__id": ID_KWARGS,
"tags__name": CHAR_KWARGS, "tags__name": CHAR_KWARGS,
"document_type": ["isnull"], "document_type": ["isnull"],
"document_type__id": ID_KWARGS, "document_type__id": ID_KWARGS,
"document_type__name": CHAR_KWARGS, "document_type__name": CHAR_KWARGS,
"storage_path": ["isnull"], "storage_path": ["isnull"],
"storage_path__id": ID_KWARGS, "storage_path__id": ID_KWARGS,
"storage_path__name": CHAR_KWARGS, "storage_path__name": CHAR_KWARGS,
} }
class LogFilterSet(FilterSet): class LogFilterSet(FilterSet):
class Meta: class Meta:
model = Log model = Log
fields = {"level": INT_KWARGS, "created": DATE_KWARGS, "group": ID_KWARGS} fields = {"level": INT_KWARGS, "created": DATE_KWARGS, "group": ID_KWARGS}
class StoragePathFilterSet(FilterSet): class StoragePathFilterSet(FilterSet):
class Meta: class Meta:
model = StoragePath model = StoragePath
fields = { fields = {
"name": CHAR_KWARGS, "name": CHAR_KWARGS,
"path": CHAR_KWARGS, "path": CHAR_KWARGS,
} }
class ObjectOwnedOrGrantedPermissionsFilter(ObjectPermissionsFilter): class ObjectOwnedOrGrantedPermissionsFilter(ObjectPermissionsFilter):
""" """
A filter backend that limits results to those where the requesting user A filter backend that limits results to those where the requesting user
has read object level permissions, owns the objects, or objects without has read object level permissions, owns the objects, or objects without
an owner (for backwards compat) an owner (for backwards compat)
""" """
def filter_queryset(self, request, queryset, view): def filter_queryset(self, request, queryset, view):
objects_with_perms = super().filter_queryset(request, queryset, view) objects_with_perms = super().filter_queryset(request, queryset, view)
objects_owned = queryset.filter(owner=request.user) objects_owned = queryset.filter(owner=request.user)
objects_unowned = queryset.filter(owner__isnull=True) objects_unowned = queryset.filter(owner__isnull=True)
return objects_with_perms | objects_owned | objects_unowned return objects_with_perms | objects_owned | objects_unowned

View File

@ -1,343 +1,343 @@
import logging import logging
import math import math
import os import os
from contextlib import contextmanager from contextlib import contextmanager
from dateutil.parser import isoparse from dateutil.parser import isoparse
from django.conf import settings from django.conf import settings
from django.utils import timezone from django.utils import timezone
from documents.models import Document, Metadata from documents.models import Document, Metadata
from documents.models import Note from documents.models import Note
from guardian.shortcuts import get_users_with_perms from guardian.shortcuts import get_users_with_perms
from whoosh import classify from whoosh import classify
from whoosh import highlight from whoosh import highlight
from whoosh import query from whoosh import query
from whoosh.fields import BOOLEAN from whoosh.fields import BOOLEAN
from whoosh.fields import DATETIME from whoosh.fields import DATETIME
from whoosh.fields import KEYWORD from whoosh.fields import KEYWORD
from whoosh.fields import NUMERIC from whoosh.fields import NUMERIC
from whoosh.fields import Schema from whoosh.fields import Schema
from whoosh.fields import TEXT from whoosh.fields import TEXT
from whoosh.highlight import HtmlFormatter from whoosh.highlight import HtmlFormatter
from whoosh.index import create_in from whoosh.index import create_in
from whoosh.index import exists_in from whoosh.index import exists_in
from whoosh.index import open_dir from whoosh.index import open_dir
from whoosh.qparser import MultifieldParser from whoosh.qparser import MultifieldParser
from whoosh.qparser.dateparse import DateParserPlugin from whoosh.qparser.dateparse import DateParserPlugin
from whoosh.searching import ResultsPage from whoosh.searching import ResultsPage
from whoosh.searching import Searcher from whoosh.searching import Searcher
from whoosh.writing import AsyncWriter from whoosh.writing import AsyncWriter
logger = logging.getLogger("paperless.index") logger = logging.getLogger("paperless.index")
def get_schema(): def get_schema():
return Schema( return Schema(
id=NUMERIC(stored=True, unique=True), id=NUMERIC(stored=True, unique=True),
title=TEXT(sortable=True), title=TEXT(sortable=True),
content=TEXT(), content=TEXT(),
asn=NUMERIC(sortable=True, signed=False), asn=NUMERIC(sortable=True, signed=False),
correspondent=TEXT(sortable=True), correspondent=TEXT(sortable=True),
correspondent_id=NUMERIC(), correspondent_id=NUMERIC(),
has_correspondent=BOOLEAN(), has_correspondent=BOOLEAN(),
tag=KEYWORD(commas=True, scorable=True, lowercase=True), tag=KEYWORD(commas=True, scorable=True, lowercase=True),
tag_id=KEYWORD(commas=True, scorable=True), tag_id=KEYWORD(commas=True, scorable=True),
has_tag=BOOLEAN(), has_tag=BOOLEAN(),
type=TEXT(sortable=True), type=TEXT(sortable=True),
type_id=NUMERIC(), type_id=NUMERIC(),
has_type=BOOLEAN(), has_type=BOOLEAN(),
created=DATETIME(sortable=True), created=DATETIME(sortable=True),
modified=DATETIME(sortable=True), modified=DATETIME(sortable=True),
added=DATETIME(sortable=True), added=DATETIME(sortable=True),
path=TEXT(sortable=True), path=TEXT(sortable=True),
path_id=NUMERIC(), path_id=NUMERIC(),
has_path=BOOLEAN(), has_path=BOOLEAN(),
notes=TEXT(), notes=TEXT(),
metadatas=TEXT(), metadatas=TEXT(),
owner=TEXT(), owner=TEXT(),
owner_id=NUMERIC(), owner_id=NUMERIC(),
has_owner=BOOLEAN(), has_owner=BOOLEAN(),
viewer_id=KEYWORD(commas=True), viewer_id=KEYWORD(commas=True),
) )
def open_index(recreate=False): def open_index(recreate=False):
try: try:
if exists_in(settings.INDEX_DIR) and not recreate: if exists_in(settings.INDEX_DIR) and not recreate:
return open_dir(settings.INDEX_DIR, schema=get_schema()) return open_dir(settings.INDEX_DIR, schema=get_schema())
except Exception: except Exception:
logger.exception("Error while opening the index, recreating.") logger.exception("Error while opening the index, recreating.")
if not os.path.isdir(settings.INDEX_DIR): if not os.path.isdir(settings.INDEX_DIR):
os.makedirs(settings.INDEX_DIR, exist_ok=True) os.makedirs(settings.INDEX_DIR, exist_ok=True)
return create_in(settings.INDEX_DIR, get_schema()) return create_in(settings.INDEX_DIR, get_schema())
@contextmanager @contextmanager
def open_index_writer(optimize=False): def open_index_writer(optimize=False):
writer = AsyncWriter(open_index()) writer = AsyncWriter(open_index())
try: try:
yield writer yield writer
except Exception as e: except Exception as e:
logger.exception(str(e)) logger.exception(str(e))
writer.cancel() writer.cancel()
finally: finally:
writer.commit(optimize=optimize) writer.commit(optimize=optimize)
@contextmanager @contextmanager
def open_index_searcher(): def open_index_searcher():
searcher = open_index().searcher() searcher = open_index().searcher()
try: try:
yield searcher yield searcher
finally: finally:
searcher.close() searcher.close()
def update_document(writer: AsyncWriter, doc: Document): def update_document(writer: AsyncWriter, doc: Document):
tags = ",".join([t.name for t in doc.tags.all()]) tags = ",".join([t.name for t in doc.tags.all()])
tags_ids = ",".join([str(t.id) for t in doc.tags.all()]) tags_ids = ",".join([str(t.id) for t in doc.tags.all()])
notes = ",".join([str(c.note) for c in Note.objects.filter(document=doc)]) notes = ",".join([str(c.note) for c in Note.objects.filter(document=doc)])
latest_metadata = Metadata.objects.filter(document=doc).order_by('-created').first() latest_metadata = Metadata.objects.filter(document=doc).order_by('-created').first()
metadatas = str(latest_metadata) if latest_metadata else '' metadatas = str(latest_metadata) if latest_metadata else ''
asn = doc.archive_serial_number asn = doc.archive_serial_number
if asn is not None and ( if asn is not None and (
asn < Document.ARCHIVE_SERIAL_NUMBER_MIN asn < Document.ARCHIVE_SERIAL_NUMBER_MIN
or asn > Document.ARCHIVE_SERIAL_NUMBER_MAX or asn > Document.ARCHIVE_SERIAL_NUMBER_MAX
): ):
logger.error( logger.error(
f"Not indexing Archive Serial Number {asn} of document {doc.pk}. " f"Not indexing Archive Serial Number {asn} of document {doc.pk}. "
f"ASN is out of range " f"ASN is out of range "
f"[{Document.ARCHIVE_SERIAL_NUMBER_MIN:,}, " f"[{Document.ARCHIVE_SERIAL_NUMBER_MIN:,}, "
f"{Document.ARCHIVE_SERIAL_NUMBER_MAX:,}.", f"{Document.ARCHIVE_SERIAL_NUMBER_MAX:,}.",
) )
asn = 0 asn = 0
users_with_perms = get_users_with_perms( users_with_perms = get_users_with_perms(
doc, doc,
only_with_perms_in=["view_document"], only_with_perms_in=["view_document"],
) )
viewer_ids = ",".join([str(u.id) for u in users_with_perms]) viewer_ids = ",".join([str(u.id) for u in users_with_perms])
writer.update_document( writer.update_document(
id=doc.pk, id=doc.pk,
title=doc.title, title=doc.title,
content=doc.content, content=doc.content,
correspondent=doc.correspondent.name if doc.correspondent else None, correspondent=doc.correspondent.name if doc.correspondent else None,
correspondent_id=doc.correspondent.id if doc.correspondent else None, correspondent_id=doc.correspondent.id if doc.correspondent else None,
has_correspondent=doc.correspondent is not None, has_correspondent=doc.correspondent is not None,
tag=tags if tags else None, tag=tags if tags else None,
tag_id=tags_ids if tags_ids else None, tag_id=tags_ids if tags_ids else None,
has_tag=len(tags) > 0, has_tag=len(tags) > 0,
type=doc.document_type.name if doc.document_type else None, type=doc.document_type.name if doc.document_type else None,
type_id=doc.document_type.id if doc.document_type else None, type_id=doc.document_type.id if doc.document_type else None,
has_type=doc.document_type is not None, has_type=doc.document_type is not None,
created=doc.created, created=doc.created,
added=doc.added, added=doc.added,
asn=asn, asn=asn,
modified=doc.modified, modified=doc.modified,
path=doc.storage_path.name if doc.storage_path else None, path=doc.storage_path.name if doc.storage_path else None,
path_id=doc.storage_path.id if doc.storage_path else None, path_id=doc.storage_path.id if doc.storage_path else None,
has_path=doc.storage_path is not None, has_path=doc.storage_path is not None,
notes=notes, notes=notes,
# metadatas=metadatas, # metadatas=metadatas,
owner=doc.owner.username if doc.owner else None, owner=doc.owner.username if doc.owner else None,
owner_id=doc.owner.id if doc.owner else None, owner_id=doc.owner.id if doc.owner else None,
has_owner=doc.owner is not None, has_owner=doc.owner is not None,
viewer_id=viewer_ids if viewer_ids else None, viewer_id=viewer_ids if viewer_ids else None,
) )
def remove_document(writer, doc): def remove_document(writer, doc):
remove_document_by_id(writer, doc.pk) remove_document_by_id(writer, doc.pk)
def remove_document_by_id(writer, doc_id): def remove_document_by_id(writer, doc_id):
writer.delete_by_term("id", doc_id) writer.delete_by_term("id", doc_id)
def add_or_update_document(document): def add_or_update_document(document):
with open_index_writer() as writer: with open_index_writer() as writer:
update_document(writer, document) update_document(writer, document)
def remove_document_from_index(document): def remove_document_from_index(document):
with open_index_writer() as writer: with open_index_writer() as writer:
remove_document(writer, document) remove_document(writer, document)
class DelayedQuery: class DelayedQuery:
def _get_query(self): def _get_query(self):
raise NotImplementedError raise NotImplementedError
def _get_query_filter(self): def _get_query_filter(self):
criterias = [] criterias = []
for k, v in self.query_params.items(): for k, v in self.query_params.items():
if k == "correspondent__id": if k == "correspondent__id":
criterias.append(query.Term("correspondent_id", v)) criterias.append(query.Term("correspondent_id", v))
elif k == "tags__id__all": elif k == "tags__id__all":
for tag_id in v.split(","): for tag_id in v.split(","):
criterias.append(query.Term("tag_id", tag_id)) criterias.append(query.Term("tag_id", tag_id))
elif k == "tags__id__none": elif k == "tags__id__none":
for tag_id in v.split(","): for tag_id in v.split(","):
criterias.append(query.Not(query.Term("tag_id", tag_id))) criterias.append(query.Not(query.Term("tag_id", tag_id)))
elif k == "document_type__id": elif k == "document_type__id":
criterias.append(query.Term("type_id", v)) criterias.append(query.Term("type_id", v))
elif k == "correspondent__isnull": elif k == "correspondent__isnull":
criterias.append(query.Term("has_correspondent", v == "false")) criterias.append(query.Term("has_correspondent", v == "false"))
elif k == "is_tagged": elif k == "is_tagged":
criterias.append(query.Term("has_tag", v == "true")) criterias.append(query.Term("has_tag", v == "true"))
elif k == "document_type__isnull": elif k == "document_type__isnull":
criterias.append(query.Term("has_type", v == "false")) criterias.append(query.Term("has_type", v == "false"))
elif k == "created__date__lt": elif k == "created__date__lt":
criterias.append( criterias.append(
query.DateRange("created", start=None, end=isoparse(v)), query.DateRange("created", start=None, end=isoparse(v)),
) )
elif k == "created__date__gt": elif k == "created__date__gt":
criterias.append( criterias.append(
query.DateRange("created", start=isoparse(v), end=None), query.DateRange("created", start=isoparse(v), end=None),
) )
elif k == "added__date__gt": elif k == "added__date__gt":
criterias.append(query.DateRange("added", start=isoparse(v), end=None)) criterias.append(query.DateRange("added", start=isoparse(v), end=None))
elif k == "added__date__lt": elif k == "added__date__lt":
criterias.append(query.DateRange("added", start=None, end=isoparse(v))) criterias.append(query.DateRange("added", start=None, end=isoparse(v)))
elif k == "storage_path__id": elif k == "storage_path__id":
criterias.append(query.Term("path_id", v)) criterias.append(query.Term("path_id", v))
elif k == "storage_path__isnull": elif k == "storage_path__isnull":
criterias.append(query.Term("has_path", v == "false")) criterias.append(query.Term("has_path", v == "false"))
user_criterias = [query.Term("has_owner", False)] user_criterias = [query.Term("has_owner", False)]
if "user" in self.query_params: if "user" in self.query_params:
user_criterias.append(query.Term("owner_id", self.query_params["user"])) user_criterias.append(query.Term("owner_id", self.query_params["user"]))
user_criterias.append( user_criterias.append(
query.Term("viewer_id", str(self.query_params["user"])), query.Term("viewer_id", str(self.query_params["user"])),
) )
if len(criterias) > 0: if len(criterias) > 0:
criterias.append(query.Or(user_criterias)) criterias.append(query.Or(user_criterias))
return query.And(criterias) return query.And(criterias)
else: else:
return query.Or(user_criterias) return query.Or(user_criterias)
def _get_query_sortedby(self): def _get_query_sortedby(self):
if "ordering" not in self.query_params: if "ordering" not in self.query_params:
return None, False return None, False
field: str = self.query_params["ordering"] field: str = self.query_params["ordering"]
sort_fields_map = { sort_fields_map = {
"created": "created", "created": "created",
"modified": "modified", "modified": "modified",
"added": "added", "added": "added",
"title": "title", "title": "title",
"correspondent__name": "correspondent", "correspondent__name": "correspondent",
"document_type__name": "type", "document_type__name": "type",
"archive_serial_number": "asn", "archive_serial_number": "asn",
} }
if field.startswith("-"): if field.startswith("-"):
field = field[1:] field = field[1:]
reverse = True reverse = True
else: else:
reverse = False reverse = False
if field not in sort_fields_map: if field not in sort_fields_map:
return None, False return None, False
else: else:
return sort_fields_map[field], reverse return sort_fields_map[field], reverse
def __init__(self, searcher: Searcher, query_params, page_size): def __init__(self, searcher: Searcher, query_params, page_size):
self.searcher = searcher self.searcher = searcher
self.query_params = query_params self.query_params = query_params
self.page_size = page_size self.page_size = page_size
self.saved_results = dict() self.saved_results = dict()
self.first_score = None self.first_score = None
def __len__(self): def __len__(self):
page = self[0:1] page = self[0:1]
return len(page) return len(page)
def __getitem__(self, item): def __getitem__(self, item):
if item.start in self.saved_results: if item.start in self.saved_results:
return self.saved_results[item.start] return self.saved_results[item.start]
q, mask = self._get_query() q, mask = self._get_query()
sortedby, reverse = self._get_query_sortedby() sortedby, reverse = self._get_query_sortedby()
page: ResultsPage = self.searcher.search_page( page: ResultsPage = self.searcher.search_page(
q, q,
mask=mask, mask=mask,
filter=self._get_query_filter(), filter=self._get_query_filter(),
pagenum=math.floor(item.start / self.page_size) + 1, pagenum=math.floor(item.start / self.page_size) + 1,
pagelen=self.page_size, pagelen=self.page_size,
sortedby=sortedby, sortedby=sortedby,
reverse=reverse, reverse=reverse,
) )
page.results.fragmenter = highlight.ContextFragmenter(surround=50) page.results.fragmenter = highlight.ContextFragmenter(surround=50)
page.results.formatter = HtmlFormatter(tagname="span", between=" ... ") page.results.formatter = HtmlFormatter(tagname="span", between=" ... ")
if not self.first_score and len(page.results) > 0 and sortedby is None: if not self.first_score and len(page.results) > 0 and sortedby is None:
self.first_score = page.results[0].score self.first_score = page.results[0].score
page.results.top_n = list( page.results.top_n = list(
map( map(
lambda hit: ( lambda hit: (
(hit[0] / self.first_score) if self.first_score else None, (hit[0] / self.first_score) if self.first_score else None,
hit[1], hit[1],
), ),
page.results.top_n, page.results.top_n,
), ),
) )
self.saved_results[item.start] = page self.saved_results[item.start] = page
return page return page
class DelayedFullTextQuery(DelayedQuery): class DelayedFullTextQuery(DelayedQuery):
def _get_query(self): def _get_query(self):
q_str = self.query_params["query"] q_str = self.query_params["query"]
qp = MultifieldParser( qp = MultifieldParser(
["content", "title", "correspondent", "tag", "type", "notes", "metadatas"], ["content", "title", "correspondent", "tag", "type", "notes", "metadatas"],
self.searcher.ixreader.schema, self.searcher.ixreader.schema,
) )
qp.add_plugin(DateParserPlugin(basedate=timezone.now())) qp.add_plugin(DateParserPlugin(basedate=timezone.now()))
q = qp.parse(q_str) q = qp.parse(q_str)
corrected = self.searcher.correct_query(q, q_str) corrected = self.searcher.correct_query(q, q_str)
if corrected.query != q: if corrected.query != q:
corrected.query = corrected.string corrected.query = corrected.string
return q, None return q, None
class DelayedMoreLikeThisQuery(DelayedQuery): class DelayedMoreLikeThisQuery(DelayedQuery):
def _get_query(self): def _get_query(self):
more_like_doc_id = int(self.query_params["more_like_id"]) more_like_doc_id = int(self.query_params["more_like_id"])
content = Document.objects.get(id=more_like_doc_id).content content = Document.objects.get(id=more_like_doc_id).content
docnum = self.searcher.document_number(id=more_like_doc_id) docnum = self.searcher.document_number(id=more_like_doc_id)
kts = self.searcher.key_terms_from_text( kts = self.searcher.key_terms_from_text(
"content", "content",
content, content,
numterms=20, numterms=20,
model=classify.Bo1Model, model=classify.Bo1Model,
normalize=False, normalize=False,
) )
q = query.Or( q = query.Or(
[query.Term("content", word, boost=weight) for word, weight in kts], [query.Term("content", word, boost=weight) for word, weight in kts],
) )
mask = {docnum} mask = {docnum}
return q, mask return q, mask
def autocomplete(ix, term, limit=10): def autocomplete(ix, term, limit=10):
with ix.reader() as reader: with ix.reader() as reader:
terms = [] terms = []
for (score, t) in reader.most_distinctive_terms( for (score, t) in reader.most_distinctive_terms(
"content", "content",
number=limit, number=limit,
prefix=term.lower(), prefix=term.lower(),
): ):
terms.append(t) terms.append(t)
return terms return terms

View File

@ -1,69 +1,69 @@
from django.db import migrations, models from django.db import migrations, models
import django.utils.timezone import django.utils.timezone
from django.conf import settings from django.conf import settings
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
("documents", "1035_rename_comment_note"), ("documents", "1035_rename_comment_note"),
] ]
operations = [ operations = [
migrations.CreateModel( migrations.CreateModel(
name="Metadata", name="Metadata",
fields=[ fields=[
( (
"id", "id",
models.AutoField( models.AutoField(
auto_created=True, auto_created=True,
primary_key=True, primary_key=True,
serialize=False, serialize=False,
verbose_name="ID", verbose_name="ID",
), ),
), ),
( (
"data", "data",
models.JSONField( models.JSONField(
blank=True, blank=True,
help_text="JSON metadata", help_text="JSON metadata",
verbose_name="data" verbose_name="data"
), ),
), ),
( (
"created", "created",
models.DateTimeField( models.DateTimeField(
db_index=True, db_index=True,
default=django.utils.timezone.now, default=django.utils.timezone.now,
verbose_name="created", verbose_name="created",
), ),
), ),
( (
"document", "document",
models.ForeignKey( models.ForeignKey(
blank=True, blank=True,
null=True, null=True,
on_delete=django.db.models.deletion.CASCADE, on_delete=django.db.models.deletion.CASCADE,
related_name="metadatas", related_name="metadatas",
to="documents.document", to="documents.document",
verbose_name="document", verbose_name="document",
), ),
), ),
( (
"user", "user",
models.ForeignKey( models.ForeignKey(
blank=True, blank=True,
null=True, null=True,
on_delete=django.db.models.deletion.SET_NULL, on_delete=django.db.models.deletion.SET_NULL,
related_name="metadatas", related_name="metadatas",
to=settings.AUTH_USER_MODEL, to=settings.AUTH_USER_MODEL,
verbose_name="user", verbose_name="user",
), ),
), ),
], ],
options={ options={
"verbose_name": "metadata", "verbose_name": "metadata",
"verbose_name_plural": "metadatas", "verbose_name_plural": "metadatas",
"ordering": ("created",), "ordering": ("created",),
}, },
), ),
] ]

View File

@ -1,24 +1,24 @@
# Generated by Django 4.1.7 on 2023-07-23 17:36 # Generated by Django 4.1.7 on 2023-07-23 17:36
from django.db import migrations, models from django.db import migrations, models
import django.db.models.deletion import django.db.models.deletion
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('documents', '1036_add_metadata'), ('documents', '1036_add_metadata'),
] ]
operations = [ operations = [
migrations.AddField( migrations.AddField(
model_name='documenttype', model_name='documenttype',
name='default_metadata', name='default_metadata',
field=models.JSONField(blank=True, help_text='Default JSON metadata', null=True, verbose_name='default_metadata'), field=models.JSONField(blank=True, help_text='Default JSON metadata', null=True, verbose_name='default_metadata'),
), ),
migrations.AlterField( migrations.AlterField(
model_name='metadata', model_name='metadata',
name='document', name='document',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document', to='documents.document', verbose_name='document'), field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document', to='documents.document', verbose_name='document'),
), ),
] ]

View File

@ -1,19 +1,19 @@
# Generated by Django 4.1.7 on 2023-07-27 02:44 # Generated by Django 4.1.7 on 2023-07-27 02:44
from django.db import migrations, models from django.db import migrations, models
import django.db.models.deletion import django.db.models.deletion
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('documents', '1037_alter_documenttype_add_default_metadata'), ('documents', '1037_alter_documenttype_add_default_metadata'),
] ]
operations = [ operations = [
migrations.AlterField( migrations.AlterField(
model_name='metadata', model_name='metadata',
name='document', name='document',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='metadatas', to='documents.document', verbose_name='document'), field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='metadatas', to='documents.document', verbose_name='document'),
), ),
] ]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,305 +1,305 @@
import hashlib import hashlib
import logging import logging
import shutil import shutil
import uuid import uuid
from typing import Optional from typing import Optional
from typing import Type from typing import Type
import tqdm import tqdm
from asgiref.sync import async_to_sync from asgiref.sync import async_to_sync
from celery import shared_task from celery import shared_task
from channels.layers import get_channel_layer from channels.layers import get_channel_layer
from django.conf import settings from django.conf import settings
from django.db import transaction from django.db import transaction
from django.db.models.signals import post_save from django.db.models.signals import post_save
from documents import barcodes from documents import barcodes
from documents import index from documents import index
from documents import sanity_checker from documents import sanity_checker
from documents.classifier import DocumentClassifier from documents.classifier import DocumentClassifier
from documents.classifier import load_classifier from documents.classifier import load_classifier
from documents.consumer import Consumer from documents.consumer import Consumer
from documents.consumer import ConsumerError from documents.consumer import ConsumerError
from documents.data_models import ConsumableDocument from documents.data_models import ConsumableDocument
from documents.data_models import DocumentMetadataOverrides from documents.data_models import DocumentMetadataOverrides
from documents.data_models import DocumentSource from documents.data_models import DocumentSource
from documents.file_handling import create_source_path_directory from documents.file_handling import create_source_path_directory
from documents.file_handling import generate_unique_filename from documents.file_handling import generate_unique_filename
from documents.models import Correspondent from documents.models import Correspondent
from documents.models import Document from documents.models import Document
from documents.models import DocumentType from documents.models import DocumentType
from documents.models import StoragePath from documents.models import StoragePath
from documents.models import Tag from documents.models import Tag
from documents.parsers import DocumentParser from documents.parsers import DocumentParser
from documents.parsers import get_parser_class_for_mime_type from documents.parsers import get_parser_class_for_mime_type
from documents.sanity_checker import SanityCheckFailedException from documents.sanity_checker import SanityCheckFailedException
from filelock import FileLock from filelock import FileLock
from redis.exceptions import ConnectionError from redis.exceptions import ConnectionError
from whoosh.writing import AsyncWriter from whoosh.writing import AsyncWriter
logger = logging.getLogger("paperless.tasks") logger = logging.getLogger("paperless.tasks")
@shared_task @shared_task
def index_optimize(): def index_optimize():
ix = index.open_index() ix = index.open_index()
writer = AsyncWriter(ix) writer = AsyncWriter(ix)
writer.commit(optimize=True) writer.commit(optimize=True)
def index_reindex(progress_bar_disable=False): def index_reindex(progress_bar_disable=False):
documents = Document.objects.all() documents = Document.objects.all()
ix = index.open_index(recreate=True) ix = index.open_index(recreate=True)
with AsyncWriter(ix) as writer: with AsyncWriter(ix) as writer:
for document in tqdm.tqdm(documents, disable=progress_bar_disable): for document in tqdm.tqdm(documents, disable=progress_bar_disable):
index.update_document(writer, document) index.update_document(writer, document)
@shared_task @shared_task
def train_classifier(): def train_classifier():
if ( if (
not Tag.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists() not Tag.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
and not DocumentType.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists() and not DocumentType.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
and not Correspondent.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists() and not Correspondent.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
and not StoragePath.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists() and not StoragePath.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
): ):
return return
classifier = load_classifier() classifier = load_classifier()
if not classifier: if not classifier:
classifier = DocumentClassifier() classifier = DocumentClassifier()
try: try:
if classifier.train(): if classifier.train():
logger.info( logger.info(
f"Saving updated classifier model to {settings.MODEL_FILE}...", f"Saving updated classifier model to {settings.MODEL_FILE}...",
) )
classifier.save() classifier.save()
else: else:
logger.debug("Training data unchanged.") logger.debug("Training data unchanged.")
except Exception as e: except Exception as e:
logger.warning("Classifier error: " + str(e)) logger.warning("Classifier error: " + str(e))
@shared_task @shared_task
def consume_file( def consume_file(
input_doc: ConsumableDocument, input_doc: ConsumableDocument,
overrides: Optional[DocumentMetadataOverrides] = None, overrides: Optional[DocumentMetadataOverrides] = None,
): ):
# Default no overrides # Default no overrides
if overrides is None: if overrides is None:
overrides = DocumentMetadataOverrides() overrides = DocumentMetadataOverrides()
# read all barcodes in the current document # read all barcodes in the current document
if settings.CONSUMER_ENABLE_BARCODES or settings.CONSUMER_ENABLE_ASN_BARCODE: if settings.CONSUMER_ENABLE_BARCODES or settings.CONSUMER_ENABLE_ASN_BARCODE:
doc_barcode_info = barcodes.scan_file_for_barcodes( doc_barcode_info = barcodes.scan_file_for_barcodes(
input_doc.original_file, input_doc.original_file,
input_doc.mime_type, input_doc.mime_type,
) )
# split document by separator pages, if enabled # split document by separator pages, if enabled
if settings.CONSUMER_ENABLE_BARCODES: if settings.CONSUMER_ENABLE_BARCODES:
separators = barcodes.get_separating_barcodes(doc_barcode_info.barcodes) separators = barcodes.get_separating_barcodes(doc_barcode_info.barcodes)
if len(separators) > 0: if len(separators) > 0:
logger.debug( logger.debug(
f"Pages with separators found in: {input_doc.original_file}", f"Pages with separators found in: {input_doc.original_file}",
) )
document_list = barcodes.separate_pages( document_list = barcodes.separate_pages(
doc_barcode_info.pdf_path, doc_barcode_info.pdf_path,
separators, separators,
) )
if document_list: if document_list:
# If the file is an upload, it's in the scratch directory # If the file is an upload, it's in the scratch directory
# Move it to consume directory to be picked up # Move it to consume directory to be picked up
# Otherwise, use the current parent to keep possible tags # Otherwise, use the current parent to keep possible tags
# from subdirectories # from subdirectories
if input_doc.source != DocumentSource.ConsumeFolder: if input_doc.source != DocumentSource.ConsumeFolder:
save_to_dir = settings.CONSUMPTION_DIR save_to_dir = settings.CONSUMPTION_DIR
else: else:
# Note this uses the original file, because it's in the # Note this uses the original file, because it's in the
# consume folder already and may include additional path # consume folder already and may include additional path
# components for tagging # components for tagging
# the .path is somewhere in scratch in this case # the .path is somewhere in scratch in this case
save_to_dir = input_doc.original_file.parent save_to_dir = input_doc.original_file.parent
for n, document in enumerate(document_list): for n, document in enumerate(document_list):
# save to consumption dir # save to consumption dir
# rename it to the original filename with number prefix # rename it to the original filename with number prefix
if overrides.filename is not None: if overrides.filename is not None:
newname = f"{str(n)}_{overrides.filename}" newname = f"{str(n)}_{overrides.filename}"
else: else:
newname = None newname = None
barcodes.save_to_dir( barcodes.save_to_dir(
document, document,
newname=newname, newname=newname,
target_dir=save_to_dir, target_dir=save_to_dir,
) )
# Split file has been copied safely, remove it # Split file has been copied safely, remove it
document.unlink() document.unlink()
# And clean up the directory as well, now it's empty # And clean up the directory as well, now it's empty
shutil.rmtree(document_list[0].parent) shutil.rmtree(document_list[0].parent)
# This file has been split into multiple files without issue # This file has been split into multiple files without issue
# remove the original and working copy # remove the original and working copy
input_doc.original_file.unlink() input_doc.original_file.unlink()
# If the original file was a TIFF, remove the PDF generated from it # If the original file was a TIFF, remove the PDF generated from it
if input_doc.mime_type == "image/tiff": if input_doc.mime_type == "image/tiff":
logger.debug( logger.debug(
f"Deleting file {doc_barcode_info.pdf_path}", f"Deleting file {doc_barcode_info.pdf_path}",
) )
doc_barcode_info.pdf_path.unlink() doc_barcode_info.pdf_path.unlink()
# notify the sender, otherwise the progress bar # notify the sender, otherwise the progress bar
# in the UI stays stuck # in the UI stays stuck
payload = { payload = {
"filename": overrides.filename or input_doc.original_file.name, "filename": overrides.filename or input_doc.original_file.name,
"task_id": None, "task_id": None,
"current_progress": 100, "current_progress": 100,
"max_progress": 100, "max_progress": 100,
"status": "SUCCESS", "status": "SUCCESS",
"message": "finished", "message": "finished",
} }
try: try:
async_to_sync(get_channel_layer().group_send)( async_to_sync(get_channel_layer().group_send)(
"status_updates", "status_updates",
{"type": "status_update", "data": payload}, {"type": "status_update", "data": payload},
) )
except ConnectionError as e: except ConnectionError as e:
logger.warning(f"ConnectionError on status send: {str(e)}") logger.warning(f"ConnectionError on status send: {str(e)}")
# consuming stops here, since the original document with # consuming stops here, since the original document with
# the barcodes has been split and will be consumed separately # the barcodes has been split and will be consumed separately
return "File successfully split" return "File successfully split"
# try reading the ASN from barcode # try reading the ASN from barcode
if settings.CONSUMER_ENABLE_ASN_BARCODE: if settings.CONSUMER_ENABLE_ASN_BARCODE:
overrides.asn = barcodes.get_asn_from_barcodes(doc_barcode_info.barcodes) overrides.asn = barcodes.get_asn_from_barcodes(doc_barcode_info.barcodes)
if overrides.asn: if overrides.asn:
logger.info(f"Found ASN in barcode: {overrides.asn}") logger.info(f"Found ASN in barcode: {overrides.asn}")
# continue with consumption if no barcode was found # continue with consumption if no barcode was found
document = Consumer().try_consume_file( document = Consumer().try_consume_file(
input_doc.original_file, input_doc.original_file,
override_filename=overrides.filename, override_filename=overrides.filename,
override_title=overrides.title, override_title=overrides.title,
override_correspondent_id=overrides.correspondent_id, override_correspondent_id=overrides.correspondent_id,
override_document_type_id=overrides.document_type_id, override_document_type_id=overrides.document_type_id,
override_tag_ids=overrides.tag_ids, override_tag_ids=overrides.tag_ids,
override_created=overrides.created, override_created=overrides.created,
override_asn=overrides.asn, override_asn=overrides.asn,
override_owner_id=overrides.owner_id, override_owner_id=overrides.owner_id,
override_storage_path_id=overrides.storage_path_id, override_storage_path_id=overrides.storage_path_id,
full_path=overrides.full_path full_path=overrides.full_path
) )
if document: if document:
return f"Success. New document id {document.pk} created" return f"Success. New document id {document.pk} created"
else: else:
raise ConsumerError( raise ConsumerError(
"Unknown error: Returned document was null, but " "Unknown error: Returned document was null, but "
"no error message was given.", "no error message was given.",
) )
@shared_task @shared_task
def sanity_check(): def sanity_check():
messages = sanity_checker.check_sanity() messages = sanity_checker.check_sanity()
messages.log_messages() messages.log_messages()
if messages.has_error: if messages.has_error:
raise SanityCheckFailedException("Sanity check failed with errors. See log.") raise SanityCheckFailedException("Sanity check failed with errors. See log.")
elif messages.has_warning: elif messages.has_warning:
return "Sanity check exited with warnings. See log." return "Sanity check exited with warnings. See log."
elif len(messages) > 0: elif len(messages) > 0:
return "Sanity check exited with infos. See log." return "Sanity check exited with infos. See log."
else: else:
return "No issues detected." return "No issues detected."
@shared_task @shared_task
def bulk_update_documents(document_ids): def bulk_update_documents(document_ids):
documents = Document.objects.filter(id__in=document_ids) documents = Document.objects.filter(id__in=document_ids)
ix = index.open_index() ix = index.open_index()
for doc in documents: for doc in documents:
post_save.send(Document, instance=doc, created=False) post_save.send(Document, instance=doc, created=False)
with AsyncWriter(ix) as writer: with AsyncWriter(ix) as writer:
for doc in documents: for doc in documents:
index.update_document(writer, doc) index.update_document(writer, doc)
@shared_task @shared_task
def update_document_archive_file(document_id): def update_document_archive_file(document_id):
""" """
Re-creates the archive file of a document, including new OCR content and thumbnail Re-creates the archive file of a document, including new OCR content and thumbnail
""" """
document = Document.objects.get(id=document_id) document = Document.objects.get(id=document_id)
mime_type = document.mime_type mime_type = document.mime_type
parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type)
if not parser_class: if not parser_class:
logger.error( logger.error(
f"No parser found for mime type {mime_type}, cannot " f"No parser found for mime type {mime_type}, cannot "
f"archive document {document} (ID: {document_id})", f"archive document {document} (ID: {document_id})",
) )
return return
parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) parser: DocumentParser = parser_class(logging_group=uuid.uuid4())
try: try:
parser.parse(document.source_path, mime_type, document.get_public_filename()) parser.parse(document.source_path, mime_type, document.get_public_filename())
thumbnail = parser.get_thumbnail( thumbnail = parser.get_thumbnail(
document.source_path, document.source_path,
mime_type, mime_type,
document.get_public_filename(), document.get_public_filename(),
) )
if parser.get_archive_path(): if parser.get_archive_path():
with transaction.atomic(): with transaction.atomic():
with open(parser.get_archive_path(), "rb") as f: with open(parser.get_archive_path(), "rb") as f:
checksum = hashlib.md5(f.read()).hexdigest() checksum = hashlib.md5(f.read()).hexdigest()
# I'm going to save first so that in case the file move # I'm going to save first so that in case the file move
# fails, the database is rolled back. # fails, the database is rolled back.
# We also don't use save() since that triggers the filehandling # We also don't use save() since that triggers the filehandling
# logic, and we don't want that yet (file not yet in place) # logic, and we don't want that yet (file not yet in place)
document.archive_filename = generate_unique_filename( document.archive_filename = generate_unique_filename(
document, document,
archive_filename=True, archive_filename=True,
) )
Document.objects.filter(pk=document.pk).update( Document.objects.filter(pk=document.pk).update(
archive_checksum=checksum, archive_checksum=checksum,
content=parser.get_text(), content=parser.get_text(),
archive_filename=document.archive_filename, archive_filename=document.archive_filename,
) )
with FileLock(settings.MEDIA_LOCK): with FileLock(settings.MEDIA_LOCK):
create_source_path_directory(document.archive_path) create_source_path_directory(document.archive_path)
shutil.move(parser.get_archive_path(), document.archive_path) shutil.move(parser.get_archive_path(), document.archive_path)
shutil.move(thumbnail, document.thumbnail_path) shutil.move(thumbnail, document.thumbnail_path)
with index.open_index_writer() as writer: with index.open_index_writer() as writer:
index.update_document(writer, document) index.update_document(writer, document)
except Exception: except Exception:
logger.exception( logger.exception(
f"Error while parsing document {document} (ID: {document_id})", f"Error while parsing document {document} (ID: {document_id})",
) )
finally: finally:
parser.cleanup() parser.cleanup()

View File

@ -1,48 +1,48 @@
<!DOCTYPE html> <!DOCTYPE html>
{% load static %} {% load i18n %} {% load static %} {% load i18n %}
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" /> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
<meta name="description" content="" /> <meta name="description" content="" />
<meta name="author" content="Mark Otto, Jacob Thornton, and Bootstrap contributors" /> <meta name="author" content="Mark Otto, Jacob Thornton, and Bootstrap contributors" />
<meta name="generator" content="Jekyll v4.1.1" /> <meta name="generator" content="Jekyll v4.1.1" />
<meta name="robots" content="noindex,nofollow" /> <meta name="robots" content="noindex,nofollow" />
<title>{% translate "Paperless-ngx signed out" %}</title> <title>{% translate "Paperless-ngx signed out" %}</title>
<!-- Bootstrap core CSS --> <!-- Bootstrap core CSS -->
<link href="{% static 'bootstrap.min.css' %}" rel="stylesheet" /> <link href="{% static 'bootstrap.min.css' %}" rel="stylesheet" />
<style> <style>
.bd-placeholder-img { .bd-placeholder-img {
font-size: 1.125rem; font-size: 1.125rem;
text-anchor: middle; text-anchor: middle;
-webkit-user-select: none; -webkit-user-select: none;
-moz-user-select: none; -moz-user-select: none;
-ms-user-select: none; -ms-user-select: none;
user-select: none; user-select: none;
} }
@media (min-width: 768px) { @media (min-width: 768px) {
.bd-placeholder-img-lg { .bd-placeholder-img-lg {
font-size: 3.5rem; font-size: 3.5rem;
} }
} }
</style> </style>
<!-- Custom styles for this template --> <!-- Custom styles for this template -->
<link href="{% static 'signin.css' %}" rel="stylesheet" /> <link href="{% static 'signin.css' %}" rel="stylesheet" />
</head> </head>
<body class="text-center"> <body class="text-center">
<div class="form-signin"> <div class="form-signin">
<svg xmlns="http://www.w3.org/2000/svg" width="200" height="200" version="1.0" viewBox="0 0 1200 1056"> <svg xmlns="http://www.w3.org/2000/svg" width="200" height="200" version="1.0" viewBox="0 0 1200 1056">
<path d="M472.5 33c-125.3 3.8-199.4 13.6-264 35.1-26.9 9-56.6 22.8-73.2 34.1C84.7 136.6 50 197.3 40.4 268c-2.3 17-2.3 50.1 0 67 13.8 101.7 73 239.1 167.9 390 47.6 75.7 114.1 165.4 148.5 200.4 57.4 58.3 107.8 89.2 161.7 99.1 12.2 2.3 41.3 3.1 54.5 1.6 51.4-6 112.1-31 169-69.9 70.5-48.1 154-128.9 224.6-217.4 35.6-44.5 73.3-100.3 96.7-142.8 45.7-83 74.9-156.9 86.1-218 6-32.7 8.4-66.5 5.9-83.5-3.7-25.6-10-46.3-20.3-67-9.3-18.7-17.8-30.3-34-46.6-23.9-24.1-46.9-40-86.6-59.8-89.2-44.6-214.8-74.5-354.9-84.6-51.9-3.7-131-5.2-187-3.5zm142 45c78.7 4.4 133.6 11.5 198.5 25.6 118.2 25.6 208.7 66.2 253.6 113.7 38.7 40.8 46 101.7 22.3 184.2-15.6 54.1-48.4 122.6-93.1 194.4-15.4 24.7-39 60.1-50.1 75.1-35.6 48.1-90.2 107.6-141.5 154.2-78.4 71.4-152.7 118-210.7 132.2-20 4.9-21.5 5.1-42 5.1-22.6 0-29.3-1.2-47.2-8.5-35-14.3-87.9-57.1-132.4-107-21.7-24.4-36.7-47.5-42.9-66.4-3.9-12-2.6-20.3 3.6-22.6 11.3-4.3 33.8 3.6 73.6 25.8 52 29.1 72.5 37.1 112.9 44.6 70.9 13.1 135.2 8.7 197.4-13.4 30-10.7 64.7-29.7 92-50.5 37-28.2 73.8-68.3 101.3-110.3 7.4-11.3 23.7-42 22.8-42.9-.2-.2-2.6.7-5.3 2-15.9 8.1-47.4 13.7-76.9 13.7-41.6 0-76.1-8.2-98.4-23.3-6.7-4.6-16.7-14.4-21.5-21.2-16.7-23.5-24.4-59.3-23.2-107.9 1.7-71.3 23.6-113.8 68.6-133.2 17.9-7.8 37-10.7 70.1-10.7 26-.1 36.6 1.2 51.7 6.3 15.2 5.1 26.1 13.8 31.7 25.3 2.6 5.3 9.6 28.4 9.6 31.6 0 .9-1.3 2.9-2.8 4.4-2.5 2.4-3.8 2.8-11.8 3.3-6.8.5-10.4.2-15.4-1.1-11.5-3-23.1-4.6-39-5.2-43.5-1.7-64.5 9-76 38.8-10.4 26.9-10.4 73.7 0 95.4 8.1 17 25.3 28.1 49.3 32 10.6 1.7 36.4 2 50.7.5 16.2-1.7 56.2-8.4 57.5-9.7 1.2-1.2 5.7-27.3 7.5-44.3 3.9-36.1 1.5-83.2-6.2-120.5-15.9-77.9-57.3-150.3-113.4-198.5-24.5-21.1-45.7-35.1-74.9-49.5-45.5-22.4-85.7-34.8-132.5-40.7-16-2-64.1-1.7-80 .6-36.9 5.2-63.5 12.5-104.5 28.8C377.2 156.3 335 189.8 265.6 273c-28.5 34.2-42.5 47-61.9 56.5-13.2 6.5-22.5 8.7-37.2 8.7-18.5 0-28.6-4.1-40.2-16.6-22.1-23.6-26.6-68.3-10.8-108.1 16.9-42.5 50-72.6 102.5-92.8 60.4-23.3 167.3-40.1 281-44.1 17.5-.6 97.3.3 115.5 1.4zm-342 254c20 1.9 24 4.1 30.5 17l3 5.9V557h147.8l.7 3.1c.9 4-.2 14.8-2.1 21.9-2.5 8.7-7 16.1-14.2 23.3-8.1 8-16 12.9-26.7 16.5l-8 2.7-85.6.3-85.6.3-.7-43.3c-.3-23.8-.9-90-1.2-147.1l-.7-103.7h15.9c8.7 0 20.9.5 26.9 1zm338.1 3.5c24.2 3.9 42.1 13 57.3 29.1 8.1 8.5 14.8 19.6 17.1 28.4.7 2.5 1.2 8.4 1.1 13.2-.2 25.4-12.6 45.7-34.8 56.9l-8.2 4.2 6.5 2.3c8 2.8 16.2 7.3 23.6 13.2 17.9 14 26 37.4 23 66.2-1.9 18-6.8 27.5-22.2 43.1-9.1 9.2-11.7 11.2-19 14.7-9.1 4.4-21.3 8.1-35 10.8-7.3 1.5-18.6 1.8-82.2 2.1l-73.8.4V503.3c0-127.7-.3-121.4 6-133.1 3.7-7 13.1-18.5 18.2-22.4 7-5.3 17.6-9.9 28.8-12.4 8.6-1.9 81.8-1.8 93.6.1z" /> <path d="M472.5 33c-125.3 3.8-199.4 13.6-264 35.1-26.9 9-56.6 22.8-73.2 34.1C84.7 136.6 50 197.3 40.4 268c-2.3 17-2.3 50.1 0 67 13.8 101.7 73 239.1 167.9 390 47.6 75.7 114.1 165.4 148.5 200.4 57.4 58.3 107.8 89.2 161.7 99.1 12.2 2.3 41.3 3.1 54.5 1.6 51.4-6 112.1-31 169-69.9 70.5-48.1 154-128.9 224.6-217.4 35.6-44.5 73.3-100.3 96.7-142.8 45.7-83 74.9-156.9 86.1-218 6-32.7 8.4-66.5 5.9-83.5-3.7-25.6-10-46.3-20.3-67-9.3-18.7-17.8-30.3-34-46.6-23.9-24.1-46.9-40-86.6-59.8-89.2-44.6-214.8-74.5-354.9-84.6-51.9-3.7-131-5.2-187-3.5zm142 45c78.7 4.4 133.6 11.5 198.5 25.6 118.2 25.6 208.7 66.2 253.6 113.7 38.7 40.8 46 101.7 22.3 184.2-15.6 54.1-48.4 122.6-93.1 194.4-15.4 24.7-39 60.1-50.1 75.1-35.6 48.1-90.2 107.6-141.5 154.2-78.4 71.4-152.7 118-210.7 132.2-20 4.9-21.5 5.1-42 5.1-22.6 0-29.3-1.2-47.2-8.5-35-14.3-87.9-57.1-132.4-107-21.7-24.4-36.7-47.5-42.9-66.4-3.9-12-2.6-20.3 3.6-22.6 11.3-4.3 33.8 3.6 73.6 25.8 52 29.1 72.5 37.1 112.9 44.6 70.9 13.1 135.2 8.7 197.4-13.4 30-10.7 64.7-29.7 92-50.5 37-28.2 73.8-68.3 101.3-110.3 7.4-11.3 23.7-42 22.8-42.9-.2-.2-2.6.7-5.3 2-15.9 8.1-47.4 13.7-76.9 13.7-41.6 0-76.1-8.2-98.4-23.3-6.7-4.6-16.7-14.4-21.5-21.2-16.7-23.5-24.4-59.3-23.2-107.9 1.7-71.3 23.6-113.8 68.6-133.2 17.9-7.8 37-10.7 70.1-10.7 26-.1 36.6 1.2 51.7 6.3 15.2 5.1 26.1 13.8 31.7 25.3 2.6 5.3 9.6 28.4 9.6 31.6 0 .9-1.3 2.9-2.8 4.4-2.5 2.4-3.8 2.8-11.8 3.3-6.8.5-10.4.2-15.4-1.1-11.5-3-23.1-4.6-39-5.2-43.5-1.7-64.5 9-76 38.8-10.4 26.9-10.4 73.7 0 95.4 8.1 17 25.3 28.1 49.3 32 10.6 1.7 36.4 2 50.7.5 16.2-1.7 56.2-8.4 57.5-9.7 1.2-1.2 5.7-27.3 7.5-44.3 3.9-36.1 1.5-83.2-6.2-120.5-15.9-77.9-57.3-150.3-113.4-198.5-24.5-21.1-45.7-35.1-74.9-49.5-45.5-22.4-85.7-34.8-132.5-40.7-16-2-64.1-1.7-80 .6-36.9 5.2-63.5 12.5-104.5 28.8C377.2 156.3 335 189.8 265.6 273c-28.5 34.2-42.5 47-61.9 56.5-13.2 6.5-22.5 8.7-37.2 8.7-18.5 0-28.6-4.1-40.2-16.6-22.1-23.6-26.6-68.3-10.8-108.1 16.9-42.5 50-72.6 102.5-92.8 60.4-23.3 167.3-40.1 281-44.1 17.5-.6 97.3.3 115.5 1.4zm-342 254c20 1.9 24 4.1 30.5 17l3 5.9V557h147.8l.7 3.1c.9 4-.2 14.8-2.1 21.9-2.5 8.7-7 16.1-14.2 23.3-8.1 8-16 12.9-26.7 16.5l-8 2.7-85.6.3-85.6.3-.7-43.3c-.3-23.8-.9-90-1.2-147.1l-.7-103.7h15.9c8.7 0 20.9.5 26.9 1zm338.1 3.5c24.2 3.9 42.1 13 57.3 29.1 8.1 8.5 14.8 19.6 17.1 28.4.7 2.5 1.2 8.4 1.1 13.2-.2 25.4-12.6 45.7-34.8 56.9l-8.2 4.2 6.5 2.3c8 2.8 16.2 7.3 23.6 13.2 17.9 14 26 37.4 23 66.2-1.9 18-6.8 27.5-22.2 43.1-9.1 9.2-11.7 11.2-19 14.7-9.1 4.4-21.3 8.1-35 10.8-7.3 1.5-18.6 1.8-82.2 2.1l-73.8.4V503.3c0-127.7-.3-121.4 6-133.1 3.7-7 13.1-18.5 18.2-22.4 7-5.3 17.6-9.9 28.8-12.4 8.6-1.9 81.8-1.8 93.6.1z" />
<path d="M537.4 393.8c-2.9 1.9-3.4 5.5-3.4 26.6V442h30.3c34 0 34.6-.2 42.1-7.6 9.9-9.9 10.2-22.9.8-32.8-2-2.1-5.9-4.9-8.7-6.2-4.9-2.3-5.6-2.4-32.5-2.4-15.1 0-28 .4-28.6.8zM534 530.1v32l32.3-.3c36-.5 38-.8 47.7-7.5 3.1-2.2 6.7-5.9 8.5-8.7 2.8-4.7 3-5.5 3-15.5 0-9.9-.2-11-3-16-4-7.2-9.5-11.7-17.3-14.1-5.8-1.8-9.4-2-38.8-2H534v32.1z" /> <path d="M537.4 393.8c-2.9 1.9-3.4 5.5-3.4 26.6V442h30.3c34 0 34.6-.2 42.1-7.6 9.9-9.9 10.2-22.9.8-32.8-2-2.1-5.9-4.9-8.7-6.2-4.9-2.3-5.6-2.4-32.5-2.4-15.1 0-28 .4-28.6.8zM534 530.1v32l32.3-.3c36-.5 38-.8 47.7-7.5 3.1-2.2 6.7-5.9 8.5-8.7 2.8-4.7 3-5.5 3-15.5 0-9.9-.2-11-3-16-4-7.2-9.5-11.7-17.3-14.1-5.8-1.8-9.4-2-38.8-2H534v32.1z" />
</svg> </svg>
<p>{% translate "You have been successfully logged out. Bye!" %}</p> <p>{% translate "You have been successfully logged out. Bye!" %}</p>
<a href="{% url 'base' %}">{% translate "Sign in again" %}</a> <a href="{% url 'base' %}">{% translate "Sign in again" %}</a>
</div> </div>
</body> </body>
</html> </html>

View File

@ -1,57 +1,57 @@
<!DOCTYPE html> <!DOCTYPE html>
{% load static %} {% load i18n %} {% load static %} {% load i18n %}
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" /> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
<meta name="description" content="" /> <meta name="description" content="" />
<meta name="author" content="Mark Otto, Jacob Thornton, and Bootstrap contributors" /> <meta name="author" content="Mark Otto, Jacob Thornton, and Bootstrap contributors" />
<meta name="generator" content="Jekyll v4.1.1" /> <meta name="generator" content="Jekyll v4.1.1" />
<meta name="robots" content="noindex,nofollow" /> <meta name="robots" content="noindex,nofollow" />
<title>{% translate "LBC Finance sign in" %}</title> <title>{% translate "LBC Finance sign in" %}</title>
<!-- Bootstrap core CSS --> <!-- Bootstrap core CSS -->
<link href="{% static 'bootstrap.min.css' %}" rel="stylesheet" /> <link href="{% static 'bootstrap.min.css' %}" rel="stylesheet" />
<style> <style>
.bd-placeholder-img { .bd-placeholder-img {
font-size: 1.125rem; font-size: 1.125rem;
text-anchor: middle; text-anchor: middle;
-webkit-user-select: none; -webkit-user-select: none;
-moz-user-select: none; -moz-user-select: none;
-ms-user-select: none; -ms-user-select: none;
user-select: none; user-select: none;
} }
@media (min-width: 768px) { @media (min-width: 768px) {
.bd-placeholder-img-lg { .bd-placeholder-img-lg {
font-size: 3.5rem; font-size: 3.5rem;
} }
} }
</style> </style>
<!-- Custom styles for this template --> <!-- Custom styles for this template -->
<link href="{% static 'signin.css' %}" rel="stylesheet" /> <link href="{% static 'signin.css' %}" rel="stylesheet" />
</head> </head>
<body class="text-center"> <body class="text-center">
<form class="form-signin" method="post"> <form class="form-signin" method="post">
{% csrf_token %} {% csrf_token %}
<svg xmlns="http://www.w3.org/2000/svg" width="200" height="200" version="1.0" viewBox="0 0 1200 1056"> <svg xmlns="http://www.w3.org/2000/svg" width="200" height="200" version="1.0" viewBox="0 0 1200 1056">
<path d="M472.5 33c-125.3 3.8-199.4 13.6-264 35.1-26.9 9-56.6 22.8-73.2 34.1C84.7 136.6 50 197.3 40.4 268c-2.3 17-2.3 50.1 0 67 13.8 101.7 73 239.1 167.9 390 47.6 75.7 114.1 165.4 148.5 200.4 57.4 58.3 107.8 89.2 161.7 99.1 12.2 2.3 41.3 3.1 54.5 1.6 51.4-6 112.1-31 169-69.9 70.5-48.1 154-128.9 224.6-217.4 35.6-44.5 73.3-100.3 96.7-142.8 45.7-83 74.9-156.9 86.1-218 6-32.7 8.4-66.5 5.9-83.5-3.7-25.6-10-46.3-20.3-67-9.3-18.7-17.8-30.3-34-46.6-23.9-24.1-46.9-40-86.6-59.8-89.2-44.6-214.8-74.5-354.9-84.6-51.9-3.7-131-5.2-187-3.5zm142 45c78.7 4.4 133.6 11.5 198.5 25.6 118.2 25.6 208.7 66.2 253.6 113.7 38.7 40.8 46 101.7 22.3 184.2-15.6 54.1-48.4 122.6-93.1 194.4-15.4 24.7-39 60.1-50.1 75.1-35.6 48.1-90.2 107.6-141.5 154.2-78.4 71.4-152.7 118-210.7 132.2-20 4.9-21.5 5.1-42 5.1-22.6 0-29.3-1.2-47.2-8.5-35-14.3-87.9-57.1-132.4-107-21.7-24.4-36.7-47.5-42.9-66.4-3.9-12-2.6-20.3 3.6-22.6 11.3-4.3 33.8 3.6 73.6 25.8 52 29.1 72.5 37.1 112.9 44.6 70.9 13.1 135.2 8.7 197.4-13.4 30-10.7 64.7-29.7 92-50.5 37-28.2 73.8-68.3 101.3-110.3 7.4-11.3 23.7-42 22.8-42.9-.2-.2-2.6.7-5.3 2-15.9 8.1-47.4 13.7-76.9 13.7-41.6 0-76.1-8.2-98.4-23.3-6.7-4.6-16.7-14.4-21.5-21.2-16.7-23.5-24.4-59.3-23.2-107.9 1.7-71.3 23.6-113.8 68.6-133.2 17.9-7.8 37-10.7 70.1-10.7 26-.1 36.6 1.2 51.7 6.3 15.2 5.1 26.1 13.8 31.7 25.3 2.6 5.3 9.6 28.4 9.6 31.6 0 .9-1.3 2.9-2.8 4.4-2.5 2.4-3.8 2.8-11.8 3.3-6.8.5-10.4.2-15.4-1.1-11.5-3-23.1-4.6-39-5.2-43.5-1.7-64.5 9-76 38.8-10.4 26.9-10.4 73.7 0 95.4 8.1 17 25.3 28.1 49.3 32 10.6 1.7 36.4 2 50.7.5 16.2-1.7 56.2-8.4 57.5-9.7 1.2-1.2 5.7-27.3 7.5-44.3 3.9-36.1 1.5-83.2-6.2-120.5-15.9-77.9-57.3-150.3-113.4-198.5-24.5-21.1-45.7-35.1-74.9-49.5-45.5-22.4-85.7-34.8-132.5-40.7-16-2-64.1-1.7-80 .6-36.9 5.2-63.5 12.5-104.5 28.8C377.2 156.3 335 189.8 265.6 273c-28.5 34.2-42.5 47-61.9 56.5-13.2 6.5-22.5 8.7-37.2 8.7-18.5 0-28.6-4.1-40.2-16.6-22.1-23.6-26.6-68.3-10.8-108.1 16.9-42.5 50-72.6 102.5-92.8 60.4-23.3 167.3-40.1 281-44.1 17.5-.6 97.3.3 115.5 1.4zm-342 254c20 1.9 24 4.1 30.5 17l3 5.9V557h147.8l.7 3.1c.9 4-.2 14.8-2.1 21.9-2.5 8.7-7 16.1-14.2 23.3-8.1 8-16 12.9-26.7 16.5l-8 2.7-85.6.3-85.6.3-.7-43.3c-.3-23.8-.9-90-1.2-147.1l-.7-103.7h15.9c8.7 0 20.9.5 26.9 1zm338.1 3.5c24.2 3.9 42.1 13 57.3 29.1 8.1 8.5 14.8 19.6 17.1 28.4.7 2.5 1.2 8.4 1.1 13.2-.2 25.4-12.6 45.7-34.8 56.9l-8.2 4.2 6.5 2.3c8 2.8 16.2 7.3 23.6 13.2 17.9 14 26 37.4 23 66.2-1.9 18-6.8 27.5-22.2 43.1-9.1 9.2-11.7 11.2-19 14.7-9.1 4.4-21.3 8.1-35 10.8-7.3 1.5-18.6 1.8-82.2 2.1l-73.8.4V503.3c0-127.7-.3-121.4 6-133.1 3.7-7 13.1-18.5 18.2-22.4 7-5.3 17.6-9.9 28.8-12.4 8.6-1.9 81.8-1.8 93.6.1z" /> <path d="M472.5 33c-125.3 3.8-199.4 13.6-264 35.1-26.9 9-56.6 22.8-73.2 34.1C84.7 136.6 50 197.3 40.4 268c-2.3 17-2.3 50.1 0 67 13.8 101.7 73 239.1 167.9 390 47.6 75.7 114.1 165.4 148.5 200.4 57.4 58.3 107.8 89.2 161.7 99.1 12.2 2.3 41.3 3.1 54.5 1.6 51.4-6 112.1-31 169-69.9 70.5-48.1 154-128.9 224.6-217.4 35.6-44.5 73.3-100.3 96.7-142.8 45.7-83 74.9-156.9 86.1-218 6-32.7 8.4-66.5 5.9-83.5-3.7-25.6-10-46.3-20.3-67-9.3-18.7-17.8-30.3-34-46.6-23.9-24.1-46.9-40-86.6-59.8-89.2-44.6-214.8-74.5-354.9-84.6-51.9-3.7-131-5.2-187-3.5zm142 45c78.7 4.4 133.6 11.5 198.5 25.6 118.2 25.6 208.7 66.2 253.6 113.7 38.7 40.8 46 101.7 22.3 184.2-15.6 54.1-48.4 122.6-93.1 194.4-15.4 24.7-39 60.1-50.1 75.1-35.6 48.1-90.2 107.6-141.5 154.2-78.4 71.4-152.7 118-210.7 132.2-20 4.9-21.5 5.1-42 5.1-22.6 0-29.3-1.2-47.2-8.5-35-14.3-87.9-57.1-132.4-107-21.7-24.4-36.7-47.5-42.9-66.4-3.9-12-2.6-20.3 3.6-22.6 11.3-4.3 33.8 3.6 73.6 25.8 52 29.1 72.5 37.1 112.9 44.6 70.9 13.1 135.2 8.7 197.4-13.4 30-10.7 64.7-29.7 92-50.5 37-28.2 73.8-68.3 101.3-110.3 7.4-11.3 23.7-42 22.8-42.9-.2-.2-2.6.7-5.3 2-15.9 8.1-47.4 13.7-76.9 13.7-41.6 0-76.1-8.2-98.4-23.3-6.7-4.6-16.7-14.4-21.5-21.2-16.7-23.5-24.4-59.3-23.2-107.9 1.7-71.3 23.6-113.8 68.6-133.2 17.9-7.8 37-10.7 70.1-10.7 26-.1 36.6 1.2 51.7 6.3 15.2 5.1 26.1 13.8 31.7 25.3 2.6 5.3 9.6 28.4 9.6 31.6 0 .9-1.3 2.9-2.8 4.4-2.5 2.4-3.8 2.8-11.8 3.3-6.8.5-10.4.2-15.4-1.1-11.5-3-23.1-4.6-39-5.2-43.5-1.7-64.5 9-76 38.8-10.4 26.9-10.4 73.7 0 95.4 8.1 17 25.3 28.1 49.3 32 10.6 1.7 36.4 2 50.7.5 16.2-1.7 56.2-8.4 57.5-9.7 1.2-1.2 5.7-27.3 7.5-44.3 3.9-36.1 1.5-83.2-6.2-120.5-15.9-77.9-57.3-150.3-113.4-198.5-24.5-21.1-45.7-35.1-74.9-49.5-45.5-22.4-85.7-34.8-132.5-40.7-16-2-64.1-1.7-80 .6-36.9 5.2-63.5 12.5-104.5 28.8C377.2 156.3 335 189.8 265.6 273c-28.5 34.2-42.5 47-61.9 56.5-13.2 6.5-22.5 8.7-37.2 8.7-18.5 0-28.6-4.1-40.2-16.6-22.1-23.6-26.6-68.3-10.8-108.1 16.9-42.5 50-72.6 102.5-92.8 60.4-23.3 167.3-40.1 281-44.1 17.5-.6 97.3.3 115.5 1.4zm-342 254c20 1.9 24 4.1 30.5 17l3 5.9V557h147.8l.7 3.1c.9 4-.2 14.8-2.1 21.9-2.5 8.7-7 16.1-14.2 23.3-8.1 8-16 12.9-26.7 16.5l-8 2.7-85.6.3-85.6.3-.7-43.3c-.3-23.8-.9-90-1.2-147.1l-.7-103.7h15.9c8.7 0 20.9.5 26.9 1zm338.1 3.5c24.2 3.9 42.1 13 57.3 29.1 8.1 8.5 14.8 19.6 17.1 28.4.7 2.5 1.2 8.4 1.1 13.2-.2 25.4-12.6 45.7-34.8 56.9l-8.2 4.2 6.5 2.3c8 2.8 16.2 7.3 23.6 13.2 17.9 14 26 37.4 23 66.2-1.9 18-6.8 27.5-22.2 43.1-9.1 9.2-11.7 11.2-19 14.7-9.1 4.4-21.3 8.1-35 10.8-7.3 1.5-18.6 1.8-82.2 2.1l-73.8.4V503.3c0-127.7-.3-121.4 6-133.1 3.7-7 13.1-18.5 18.2-22.4 7-5.3 17.6-9.9 28.8-12.4 8.6-1.9 81.8-1.8 93.6.1z" />
<path d="M537.4 393.8c-2.9 1.9-3.4 5.5-3.4 26.6V442h30.3c34 0 34.6-.2 42.1-7.6 9.9-9.9 10.2-22.9.8-32.8-2-2.1-5.9-4.9-8.7-6.2-4.9-2.3-5.6-2.4-32.5-2.4-15.1 0-28 .4-28.6.8zM534 530.1v32l32.3-.3c36-.5 38-.8 47.7-7.5 3.1-2.2 6.7-5.9 8.5-8.7 2.8-4.7 3-5.5 3-15.5 0-9.9-.2-11-3-16-4-7.2-9.5-11.7-17.3-14.1-5.8-1.8-9.4-2-38.8-2H534v32.1z" /> <path d="M537.4 393.8c-2.9 1.9-3.4 5.5-3.4 26.6V442h30.3c34 0 34.6-.2 42.1-7.6 9.9-9.9 10.2-22.9.8-32.8-2-2.1-5.9-4.9-8.7-6.2-4.9-2.3-5.6-2.4-32.5-2.4-15.1 0-28 .4-28.6.8zM534 530.1v32l32.3-.3c36-.5 38-.8 47.7-7.5 3.1-2.2 6.7-5.9 8.5-8.7 2.8-4.7 3-5.5 3-15.5 0-9.9-.2-11-3-16-4-7.2-9.5-11.7-17.3-14.1-5.8-1.8-9.4-2-38.8-2H534v32.1z" />
</svg> </svg>
<p>{% translate "Sign in to LBC Finance" %}</p> <p>{% translate "Sign in to LBC Finance" %}</p>
{% if form.errors %} {% if form.errors %}
<div class="alert alert-danger" role="alert">{% translate "Your username and password didn't match. Please try again." %}</div> <div class="alert alert-danger" role="alert">{% translate "Your username and password didn't match. Please try again." %}</div>
{% endif %} {% translate "Username" as i18n_username %} {% translate "Password" as i18n_password %} {% endif %} {% translate "Username" as i18n_username %} {% translate "Password" as i18n_password %}
<label for="inputUsername" class="sr-only">{{ i18n_username }}</label> <label for="inputUsername" class="sr-only">{{ i18n_username }}</label>
<input type="text" name="username" id="inputUsername" class="form-control" placeholder="{{ i18n_username }}" autocorrect="off" autocapitalize="none" required autofocus /> <input type="text" name="username" id="inputUsername" class="form-control" placeholder="{{ i18n_username }}" autocorrect="off" autocapitalize="none" required autofocus />
<label for="inputPassword" class="sr-only">{{ i18n_password }}</label> <label for="inputPassword" class="sr-only">{{ i18n_password }}</label>
<input type="password" name="password" id="inputPassword" class="form-control" placeholder="{{ i18n_password }}" required /> <input type="password" name="password" id="inputPassword" class="form-control" placeholder="{{ i18n_password }}" required />
<button class="btn btn-lg btn-primary btn-block" type="submit">{% translate "Sign in" %}</button> <button class="btn btn-lg btn-primary btn-block" type="submit">{% translate "Sign in" %}</button>
</form> </form>
</body> </body>
</html> </html>

File diff suppressed because it is too large Load Diff