2021-06-15 19:09:21 +02:00
|
|
|
import json
|
2021-07-10 01:34:38 +02:00
|
|
|
from dataclasses import dataclass, field
|
|
|
|
|
from dataclasses import fields as _fields
|
2021-06-15 19:09:21 +02:00
|
|
|
from datetime import datetime, timezone
|
2021-08-18 20:08:09 +02:00
|
|
|
from functools import partial
|
2023-02-04 12:46:30 +01:00
|
|
|
from types import UnionType
|
2021-07-10 01:34:38 +02:00
|
|
|
from typing import (
|
|
|
|
|
Annotated,
|
|
|
|
|
Any,
|
|
|
|
|
ClassVar,
|
2021-12-19 19:25:31 +01:00
|
|
|
Container,
|
2021-08-03 17:05:25 +02:00
|
|
|
Literal,
|
2023-03-18 00:29:29 +01:00
|
|
|
Mapping,
|
2023-03-28 23:32:24 +02:00
|
|
|
Protocol,
|
2021-07-10 01:34:38 +02:00
|
|
|
Type,
|
2023-03-18 23:30:40 +01:00
|
|
|
TypedDict,
|
2023-03-28 21:50:14 +02:00
|
|
|
TypeVar,
|
2021-07-10 01:34:38 +02:00
|
|
|
Union,
|
|
|
|
|
get_args,
|
|
|
|
|
get_origin,
|
|
|
|
|
)
|
2021-06-15 19:09:21 +02:00
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
from sqlalchemy import Column, ForeignKey, Integer, String, Table
|
|
|
|
|
from sqlalchemy.orm import registry
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
from .types import ULID
|
|
|
|
|
|
2023-02-04 17:30:54 +01:00
|
|
|
JSON = int | float | str | None | list["JSON"] | dict[str, "JSON"]
|
2021-12-19 19:25:31 +01:00
|
|
|
JSONObject = dict[str, JSON]
|
|
|
|
|
|
2021-08-03 16:39:36 +02:00
|
|
|
T = TypeVar("T")
|
|
|
|
|
|
2023-03-28 23:32:24 +02:00
|
|
|
|
|
|
|
|
class Model(Protocol):
|
|
|
|
|
__table__: ClassVar[Table]
|
|
|
|
|
|
|
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
mapper_registry = registry()
|
2023-03-28 23:03:35 +02:00
|
|
|
metadata = mapper_registry.metadata
|
2023-03-18 23:51:40 +01:00
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
|
2023-02-02 23:46:02 +01:00
|
|
|
def annotations(tp: Type) -> tuple | None:
|
2021-07-10 01:34:38 +02:00
|
|
|
return tp.__metadata__ if hasattr(tp, "__metadata__") else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fields(class_or_instance):
|
|
|
|
|
"""Like dataclass' `fields` but with extra support for our models.
|
|
|
|
|
|
|
|
|
|
This function is a drop-in replacement for dataclass' `fields` and
|
|
|
|
|
SHOULD be used instead of it everywhere.
|
|
|
|
|
This function filters out fields marked as `Relation`. `Relation`
|
|
|
|
|
fields are meant to allow to store the data referenced by an ID field
|
|
|
|
|
directly on the instance.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# XXX this might be a little slow (not sure), if so, memoize
|
|
|
|
|
|
|
|
|
|
for f in _fields(class_or_instance):
|
2021-07-22 20:30:23 +02:00
|
|
|
if f.name == "_is_lazy":
|
|
|
|
|
continue
|
|
|
|
|
|
2021-07-10 01:34:38 +02:00
|
|
|
if (attn := annotations(f.type)) and _RelationSentinel in attn:
|
|
|
|
|
continue # Relations are ignored
|
|
|
|
|
|
|
|
|
|
yield f
|
|
|
|
|
|
|
|
|
|
|
2021-12-19 19:25:31 +01:00
|
|
|
def is_optional(tp: Type) -> bool:
|
|
|
|
|
"""Return wether the given type is optional."""
|
2023-02-04 12:46:30 +01:00
|
|
|
if not isinstance(tp, UnionType) and get_origin(tp) is not Union:
|
2021-06-15 19:09:21 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
args = get_args(tp)
|
|
|
|
|
return len(args) == 2 and type(None) in args
|
|
|
|
|
|
|
|
|
|
|
2023-02-02 23:46:02 +01:00
|
|
|
def optional_type(tp: Type) -> Type | None:
|
2021-12-19 19:25:31 +01:00
|
|
|
"""Return the wrapped type from an optional type.
|
|
|
|
|
|
|
|
|
|
For example this will return `int` for `Optional[int]`.
|
|
|
|
|
Since they're equivalent this also works for other optioning notations, like
|
|
|
|
|
`Union[int, None]` and `int | None`.
|
|
|
|
|
"""
|
2023-02-04 12:46:30 +01:00
|
|
|
if not isinstance(tp, UnionType) and get_origin(tp) is not Union:
|
2021-06-15 19:09:21 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
args = get_args(tp)
|
2021-12-19 19:25:31 +01:00
|
|
|
if len(args) != 2 or type(None) not in args:
|
2021-06-15 19:09:21 +02:00
|
|
|
return None
|
|
|
|
|
|
2021-12-19 19:25:31 +01:00
|
|
|
return args[0] if args[1] is type(None) else args[1]
|
2021-06-15 19:09:21 +02:00
|
|
|
|
|
|
|
|
|
2021-06-21 18:54:03 +02:00
|
|
|
def optional_fields(o):
|
|
|
|
|
for f in fields(o):
|
|
|
|
|
if is_optional(f.type):
|
|
|
|
|
yield f
|
|
|
|
|
|
|
|
|
|
|
2021-08-18 20:08:09 +02:00
|
|
|
json_dump = partial(json.dumps, separators=(",", ":"))
|
|
|
|
|
|
|
|
|
|
|
2021-12-19 19:25:31 +01:00
|
|
|
def _id(x: T) -> T:
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def asplain(
|
2023-02-04 01:12:09 +01:00
|
|
|
o: object, *, filter_fields: Container[str] | None = None, serialize: bool = False
|
2021-12-19 19:25:31 +01:00
|
|
|
) -> dict[str, Any]:
|
|
|
|
|
"""Return the given model instance as `dict` with JSON compatible plain datatypes.
|
|
|
|
|
|
|
|
|
|
If `filter_fields` is given only matching field names will be included in
|
|
|
|
|
the resulting `dict`.
|
|
|
|
|
If `serialize` is `True`, collection types (lists, dicts, etc.) will be
|
|
|
|
|
serialized as strings; this can be useful to store them in a database. Be
|
|
|
|
|
sure to set `serialized=True` when using `fromplain` to successfully restore
|
|
|
|
|
the object.
|
|
|
|
|
"""
|
2021-06-15 19:09:21 +02:00
|
|
|
validate(o)
|
|
|
|
|
|
2021-12-19 19:25:31 +01:00
|
|
|
dump = json_dump if serialize else _id
|
|
|
|
|
|
|
|
|
|
d: JSONObject = {}
|
2021-06-15 19:09:21 +02:00
|
|
|
for f in fields(o):
|
2021-12-19 19:25:31 +01:00
|
|
|
if filter_fields is not None and f.name not in filter_fields:
|
2021-08-04 17:31:59 +02:00
|
|
|
continue
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
target = f.type
|
|
|
|
|
# XXX this doesn't properly support any kind of nested types
|
|
|
|
|
if (otype := optional_type(f.type)) is not None:
|
|
|
|
|
target = otype
|
|
|
|
|
if (otype := get_origin(target)) is not None:
|
|
|
|
|
target = otype
|
|
|
|
|
|
2021-07-10 01:34:38 +02:00
|
|
|
v = getattr(o, f.name)
|
2021-12-19 19:25:31 +01:00
|
|
|
if is_optional(f.type) and v is None:
|
|
|
|
|
d[f.name] = None
|
|
|
|
|
elif target is ULID:
|
|
|
|
|
assert isinstance(v, ULID)
|
2021-06-15 19:09:21 +02:00
|
|
|
d[f.name] = str(v)
|
|
|
|
|
elif target in {datetime}:
|
2021-12-19 19:25:31 +01:00
|
|
|
assert isinstance(v, datetime)
|
2021-06-15 19:09:21 +02:00
|
|
|
d[f.name] = v.isoformat()
|
|
|
|
|
elif target in {set}:
|
2021-12-19 19:25:31 +01:00
|
|
|
assert isinstance(v, set)
|
|
|
|
|
d[f.name] = dump(list(sorted(v)))
|
2021-06-15 19:09:21 +02:00
|
|
|
elif target in {list}:
|
2021-12-19 19:25:31 +01:00
|
|
|
assert isinstance(v, list)
|
|
|
|
|
d[f.name] = dump(list(v))
|
|
|
|
|
elif target in {bool, str, int, float}:
|
|
|
|
|
assert isinstance(
|
|
|
|
|
v, target
|
|
|
|
|
), f"Type mismatch: {f.name} ({target} != {type(v)})"
|
2021-07-10 01:34:38 +02:00
|
|
|
d[f.name] = v
|
2021-06-15 19:09:21 +02:00
|
|
|
else:
|
|
|
|
|
raise ValueError(f"Unsupported value type: {f.name}: {type(v)}")
|
|
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
2023-03-18 00:29:29 +01:00
|
|
|
def fromplain(cls: Type[T], d: Mapping, *, serialized: bool = False) -> T:
|
2021-12-19 19:25:31 +01:00
|
|
|
"""Return an instance of the given model using the given data.
|
|
|
|
|
|
|
|
|
|
If `serialized` is `True`, collection types (lists, dicts, etc.) will be
|
|
|
|
|
deserialized from string. This is the opposite operation of `serialize` for
|
|
|
|
|
`asplain`.
|
|
|
|
|
"""
|
|
|
|
|
load = json.loads if serialized else _id
|
|
|
|
|
|
|
|
|
|
dd: JSONObject = {}
|
2021-06-15 19:09:21 +02:00
|
|
|
for f in fields(cls):
|
|
|
|
|
target = f.type
|
|
|
|
|
otype = optional_type(f.type)
|
|
|
|
|
is_opt = otype is not None
|
|
|
|
|
if is_opt:
|
|
|
|
|
target = otype
|
|
|
|
|
if (xtype := get_origin(target)) is not None:
|
|
|
|
|
target = xtype
|
|
|
|
|
|
|
|
|
|
v = d[f.name]
|
|
|
|
|
if is_opt and v is None:
|
|
|
|
|
dd[f.name] = v
|
|
|
|
|
elif isinstance(v, target):
|
|
|
|
|
dd[f.name] = v
|
|
|
|
|
elif target in {set, list}:
|
2021-12-19 19:25:31 +01:00
|
|
|
dd[f.name] = target(load(v))
|
2021-06-15 19:09:21 +02:00
|
|
|
elif target in {datetime}:
|
|
|
|
|
dd[f.name] = target.fromisoformat(v)
|
|
|
|
|
else:
|
|
|
|
|
dd[f.name] = target(v)
|
|
|
|
|
|
|
|
|
|
o = cls(**dd)
|
|
|
|
|
validate(o)
|
|
|
|
|
return o
|
|
|
|
|
|
|
|
|
|
|
2021-12-19 19:25:31 +01:00
|
|
|
def validate(o: object) -> None:
|
2021-06-15 19:09:21 +02:00
|
|
|
for f in fields(o):
|
|
|
|
|
vtype = type(getattr(o, f.name))
|
|
|
|
|
if vtype is not f.type:
|
|
|
|
|
if get_origin(f.type) is vtype or (
|
2023-02-04 12:46:30 +01:00
|
|
|
(isinstance(f.type, UnionType) or get_origin(f.type) is Union)
|
|
|
|
|
and vtype in get_args(f.type)
|
2021-06-15 19:09:21 +02:00
|
|
|
):
|
|
|
|
|
continue
|
|
|
|
|
raise ValueError(f"Invalid value type: {f.name}: {vtype}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def utcnow():
|
2021-07-22 00:05:38 +02:00
|
|
|
return datetime.utcnow().replace(tzinfo=timezone.utc)
|
2021-06-15 19:09:21 +02:00
|
|
|
|
|
|
|
|
|
2023-03-28 23:03:35 +02:00
|
|
|
@mapper_registry.mapped
|
|
|
|
|
@dataclass
|
|
|
|
|
class DbPatch:
|
2023-03-28 23:32:24 +02:00
|
|
|
__table__: ClassVar[Table] = Table(
|
2023-03-28 23:03:35 +02:00
|
|
|
"db_patches",
|
|
|
|
|
metadata,
|
|
|
|
|
Column("id", Integer, primary_key=True),
|
|
|
|
|
Column("current", String),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
id: int
|
|
|
|
|
current: str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
db_patches = DbPatch.__table__
|
|
|
|
|
|
|
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
@mapper_registry.mapped
|
2021-07-22 00:05:02 +02:00
|
|
|
@dataclass
|
|
|
|
|
class Progress:
|
2023-03-28 23:32:24 +02:00
|
|
|
__table__: ClassVar[Table] = Table(
|
2023-03-18 23:51:40 +01:00
|
|
|
"progress",
|
2023-03-28 23:03:35 +02:00
|
|
|
metadata,
|
2023-03-18 23:51:40 +01:00
|
|
|
Column("id", String, primary_key=True), # ULID
|
|
|
|
|
Column("type", String, nullable=False),
|
|
|
|
|
Column("state", String, nullable=False), # JSON {"percent": ..., "error": ...}
|
|
|
|
|
Column("started", String, nullable=False), # datetime
|
|
|
|
|
Column("stopped", String),
|
|
|
|
|
)
|
|
|
|
|
|
2021-07-22 00:05:02 +02:00
|
|
|
id: ULID = field(default_factory=ULID)
|
|
|
|
|
type: str = None
|
|
|
|
|
state: str = None
|
|
|
|
|
started: datetime = field(default_factory=utcnow)
|
2023-02-02 23:46:02 +01:00
|
|
|
stopped: str | None = None
|
2021-07-22 00:05:02 +02:00
|
|
|
|
2021-07-28 23:07:04 +02:00
|
|
|
@property
|
|
|
|
|
def _state(self) -> dict:
|
|
|
|
|
return json.loads(self.state or "{}")
|
|
|
|
|
|
|
|
|
|
@_state.setter
|
|
|
|
|
def _state(self, state: dict):
|
2021-08-18 20:08:09 +02:00
|
|
|
self.state = json_dump(state)
|
2021-07-28 23:07:04 +02:00
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def percent(self) -> float:
|
|
|
|
|
return self._state["percent"]
|
|
|
|
|
|
|
|
|
|
@percent.setter
|
|
|
|
|
def percent(self, percent: float):
|
|
|
|
|
state = self._state
|
|
|
|
|
state["percent"] = percent
|
|
|
|
|
self._state = state
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def error(self) -> str:
|
|
|
|
|
return self._state.get("error", "")
|
|
|
|
|
|
|
|
|
|
@error.setter
|
|
|
|
|
def error(self, error: str):
|
|
|
|
|
state = self._state
|
|
|
|
|
state["error"] = error
|
|
|
|
|
self._state = state
|
|
|
|
|
|
2021-07-22 00:05:02 +02:00
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
@mapper_registry.mapped
|
2021-06-15 19:09:21 +02:00
|
|
|
@dataclass
|
|
|
|
|
class Movie:
|
2023-03-28 23:32:24 +02:00
|
|
|
__table__: ClassVar[Table] = Table(
|
2023-03-18 23:51:40 +01:00
|
|
|
"movies",
|
2023-03-28 23:03:35 +02:00
|
|
|
metadata,
|
2023-03-18 23:51:40 +01:00
|
|
|
Column("id", String, primary_key=True), # ULID
|
|
|
|
|
Column("title", String, nullable=False),
|
|
|
|
|
Column("original_title", String),
|
|
|
|
|
Column("release_year", Integer, nullable=False),
|
|
|
|
|
Column("media_type", String, nullable=False),
|
|
|
|
|
Column("imdb_id", String, nullable=False, unique=True),
|
|
|
|
|
Column("imdb_score", Integer),
|
|
|
|
|
Column("imdb_votes", Integer),
|
|
|
|
|
Column("runtime", Integer),
|
|
|
|
|
Column("genres", String, nullable=False),
|
|
|
|
|
Column("created", String, nullable=False), # datetime
|
|
|
|
|
Column("updated", String, nullable=False), # datetime
|
|
|
|
|
)
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
id: ULID = field(default_factory=ULID)
|
2021-06-21 18:54:03 +02:00
|
|
|
title: str = None # canonical title (usually English)
|
2023-02-02 23:46:02 +01:00
|
|
|
original_title: str | None = (
|
|
|
|
|
None # original title (usually transscribed to latin script)
|
|
|
|
|
)
|
2021-06-15 19:09:21 +02:00
|
|
|
release_year: int = None # canonical release date
|
2021-06-21 18:54:03 +02:00
|
|
|
media_type: str = None
|
2021-06-15 19:09:21 +02:00
|
|
|
imdb_id: str = None
|
2023-02-02 23:46:02 +01:00
|
|
|
imdb_score: int | None = None # range: [0,100]
|
|
|
|
|
imdb_votes: int | None = None
|
|
|
|
|
runtime: int | None = None # minutes
|
2021-06-15 19:09:21 +02:00
|
|
|
genres: set[str] = None
|
2021-07-22 20:30:23 +02:00
|
|
|
created: datetime = field(default_factory=utcnow)
|
2021-06-15 19:09:21 +02:00
|
|
|
updated: datetime = field(default_factory=utcnow)
|
|
|
|
|
|
2021-07-22 20:30:23 +02:00
|
|
|
_is_lazy: bool = field(default=False, init=False, repr=False, compare=False)
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def lazy(cls, **kwds):
|
|
|
|
|
"""Return a new instance without running default factories.
|
|
|
|
|
|
|
|
|
|
This is meant purely for optimization purposes, to postpone possibly
|
|
|
|
|
expensive initialization operations.
|
|
|
|
|
"""
|
|
|
|
|
# XXX optimize using a metaclass & storing field refs on the class
|
|
|
|
|
kwds.setdefault("id", None)
|
|
|
|
|
kwds.setdefault("created", None)
|
|
|
|
|
kwds.setdefault("updated", None)
|
|
|
|
|
movie = cls(**kwds)
|
|
|
|
|
movie._is_lazy = True
|
|
|
|
|
return movie
|
|
|
|
|
|
|
|
|
|
def _lazy_init(self):
|
|
|
|
|
if not self._is_lazy:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
for field in fields(Movie):
|
2021-07-27 19:34:28 +02:00
|
|
|
if getattr(self, field.name) is None and callable(field.default_factory):
|
2021-07-22 20:30:23 +02:00
|
|
|
setattr(self, field.name, field.default_factory())
|
|
|
|
|
|
|
|
|
|
self._is_lazy = False
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
|
2023-03-28 21:50:14 +02:00
|
|
|
movies = Movie.__table__
|
|
|
|
|
|
2021-07-10 01:34:38 +02:00
|
|
|
_RelationSentinel = object()
|
|
|
|
|
"""Mark a model field as containing external data.
|
|
|
|
|
|
|
|
|
|
For each field marked as a Relation there should be another field on the
|
|
|
|
|
dataclass containing the ID of the linked data.
|
|
|
|
|
The contents of the Relation are ignored or discarded when using
|
|
|
|
|
`asplain`, `fromplain`, and `validate`.
|
|
|
|
|
"""
|
2023-02-02 23:46:02 +01:00
|
|
|
Relation = Annotated[T | None, _RelationSentinel]
|
2021-07-10 01:34:38 +02:00
|
|
|
|
|
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
@mapper_registry.mapped
|
2021-06-15 19:09:21 +02:00
|
|
|
@dataclass
|
|
|
|
|
class Rating:
|
2023-03-28 23:32:24 +02:00
|
|
|
__table__: ClassVar[Table] = Table(
|
2023-03-18 23:51:40 +01:00
|
|
|
"ratings",
|
2023-03-28 23:03:35 +02:00
|
|
|
metadata,
|
2023-03-18 23:51:40 +01:00
|
|
|
Column("id", String, primary_key=True), # ULID
|
|
|
|
|
Column("movie_id", ForeignKey("movies.id"), nullable=False), # ULID
|
|
|
|
|
Column("user_id", ForeignKey("users.id"), nullable=False), # ULID
|
|
|
|
|
Column("score", Integer, nullable=False),
|
|
|
|
|
Column("rating_date", String, nullable=False), # datetime
|
|
|
|
|
Column("favorite", Integer), # bool
|
|
|
|
|
Column("finished", Integer), # bool
|
|
|
|
|
)
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
id: ULID = field(default_factory=ULID)
|
2021-07-10 01:34:38 +02:00
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
movie_id: ULID = None
|
2021-07-10 01:34:38 +02:00
|
|
|
movie: Relation[Movie] = None
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
user_id: ULID = None
|
2021-07-10 01:34:38 +02:00
|
|
|
user: Relation["User"] = None
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
score: int = None # range: [0,100]
|
|
|
|
|
rating_date: datetime = None
|
2023-02-02 23:46:02 +01:00
|
|
|
favorite: bool | None = None
|
|
|
|
|
finished: bool | None = None
|
2021-06-15 19:09:21 +02:00
|
|
|
|
2021-07-10 01:34:38 +02:00
|
|
|
def __eq__(self, other):
|
|
|
|
|
"""Return wether two Ratings are equal.
|
|
|
|
|
|
|
|
|
|
This operation compares all fields as expected, except that it
|
|
|
|
|
ignores any field marked as Relation.
|
|
|
|
|
"""
|
|
|
|
|
if type(other) is not type(self):
|
|
|
|
|
return False
|
|
|
|
|
return all(
|
|
|
|
|
getattr(self, f.name) == getattr(other, f.name) for f in fields(self)
|
|
|
|
|
)
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
|
2023-03-28 21:50:14 +02:00
|
|
|
ratings = Rating.__table__
|
|
|
|
|
|
2021-08-05 15:53:27 +02:00
|
|
|
Access = Literal[
|
|
|
|
|
"r", # read
|
|
|
|
|
"i", # index
|
|
|
|
|
"w", # write
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
2023-03-18 23:30:40 +01:00
|
|
|
class UserGroup(TypedDict):
|
|
|
|
|
id: str
|
|
|
|
|
access: Access
|
|
|
|
|
|
|
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
@mapper_registry.mapped
|
2021-06-15 19:09:21 +02:00
|
|
|
@dataclass
|
|
|
|
|
class User:
|
2023-03-28 23:32:24 +02:00
|
|
|
__table__: ClassVar[Table] = Table(
|
2023-03-18 23:51:40 +01:00
|
|
|
"users",
|
2023-03-28 23:03:35 +02:00
|
|
|
metadata,
|
2023-03-18 23:51:40 +01:00
|
|
|
Column("id", String, primary_key=True), # ULID
|
|
|
|
|
Column("imdb_id", String, nullable=False, unique=True),
|
|
|
|
|
Column("name", String, nullable=False),
|
|
|
|
|
Column("secret", String, nullable=False),
|
|
|
|
|
Column("groups", String, nullable=False), # JSON array
|
|
|
|
|
)
|
|
|
|
|
|
2021-06-15 19:09:21 +02:00
|
|
|
id: ULID = field(default_factory=ULID)
|
|
|
|
|
imdb_id: str = None
|
|
|
|
|
name: str = None # canonical user name
|
2021-08-04 17:31:59 +02:00
|
|
|
secret: str = None
|
2023-03-18 23:30:40 +01:00
|
|
|
groups: list[UserGroup] = field(default_factory=list)
|
2021-08-03 17:05:25 +02:00
|
|
|
|
2023-02-02 23:46:02 +01:00
|
|
|
def has_access(self, group_id: ULID | str, access: Access = "r"):
|
2021-08-03 17:05:25 +02:00
|
|
|
group_id = group_id if isinstance(group_id, str) else str(group_id)
|
2021-08-05 15:53:27 +02:00
|
|
|
return any(g["id"] == group_id and access == g["access"] for g in self.groups)
|
|
|
|
|
|
2023-02-02 23:46:02 +01:00
|
|
|
def set_access(self, group_id: ULID | str, access: Access):
|
2021-08-05 15:53:27 +02:00
|
|
|
group_id = group_id if isinstance(group_id, str) else str(group_id)
|
|
|
|
|
for g in self.groups:
|
|
|
|
|
if g["id"] == group_id:
|
|
|
|
|
g["access"] = access
|
|
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
|
self.groups.append({"id": group_id, "access": access})
|
2021-07-08 09:48:54 +02:00
|
|
|
|
|
|
|
|
|
2023-03-18 23:30:40 +01:00
|
|
|
class GroupUser(TypedDict):
|
|
|
|
|
id: str
|
|
|
|
|
name: str
|
|
|
|
|
|
|
|
|
|
|
2023-03-18 23:51:40 +01:00
|
|
|
@mapper_registry.mapped
|
2021-07-08 09:48:54 +02:00
|
|
|
@dataclass
|
|
|
|
|
class Group:
|
2023-03-28 23:32:24 +02:00
|
|
|
__table__: ClassVar[Table] = Table(
|
2023-03-18 23:51:40 +01:00
|
|
|
"groups",
|
2023-03-28 23:03:35 +02:00
|
|
|
metadata,
|
2023-03-18 23:51:40 +01:00
|
|
|
Column("id", String, primary_key=True), # ULID
|
|
|
|
|
Column("name", String, nullable=False),
|
|
|
|
|
Column("users", String, nullable=False), # JSON array
|
|
|
|
|
)
|
|
|
|
|
|
2021-07-08 09:48:54 +02:00
|
|
|
id: ULID = field(default_factory=ULID)
|
|
|
|
|
name: str = None
|
2023-03-18 23:30:40 +01:00
|
|
|
users: list[GroupUser] = field(default_factory=list)
|