Convert dataclass of dataclass to json string - python

I have a json string that I want to read, convert it to an object that I can manipulate, and then convert it back into a json string.
I am utilizing the python 3.10 dataclass, and one of the attributes of the class is another class (mySubClass). When I call json.loads(myClass), I get the following error: TypeError: Object of type mySubClass is not JSON serializable.
Is there a way I can instantiate the dataclass myClass with everything it needs (including mySubClass), and then have a "post init operation" that will convert myClass.mySubClass into a simple json str? Or am I going about this the wrong way?
My original goal was to have the following:
import json
from dataclasses import dataclass
#dataclass
mySubClass:
sub_item1: str
sub_item2: str
#dataclass
myClass:
item1: str
item2: mySubClass()
...
convert_new_jsonStr_toObj = json.loads(received_json_str, object_hook=lambda d: SimpleNamespace(**d))
...
#: Get new values/do "stuff" to the received json string
myClass_to_jsonStr = json.dumps(myClass(item1=convert_new_jsonStr_toObj.item1, item2=mySubClass(sub_item1=convert_new_jsonStr_toObj.sub_item1, sub_item2=convert_new_jsonStr_toObj.sub_item2)))
...
#: Final json will look something like:
processed_json_str = "{
"item1" : "new_item1",
"item2" : {
"sub_item1": "new_sub_item1",
"sub_item2": "new_sub_item2"
}"
}
#: send processed_json_str back out...
#: Note: "processed_json_str" has the same structure as "received_json_str".

If I've understood your question correctly, you can do something like this::
import json
import dataclasses
#dataclasses.dataclass
class mySubClass:
sub_item1: str
sub_item2: str
#dataclasses.dataclass
class myClass:
item1: str
item2: mySubClass
# We need a __post_init__ method here because otherwise
# item2 will contain a python dictionary, rather than
# an instance of mySubClass.
def __post_init__(self):
self.item2 = mySubClass(**self.item2)
sampleData = '''
{
"item1": "This is a test",
"item2": {
"sub_item1": "foo",
"sub_item2": "bar"
}
}
'''
myvar = myClass(**json.loads(sampleData))
myvar.item2.sub_item1 = 'modified'
print(json.dumps(dataclasses.asdict(myvar)))
Running this produces:
{"item1": "This is a test", "item2": {"sub_item1": "modified", "sub_item2": "bar"}}
As a side note, this all becomes easier if you use a more fully featured package like pydantic:
import json
from pydantic import BaseModel
class mySubClass(BaseModel):
sub_item1: str
sub_item2: str
class myClass(BaseModel):
item1: str
item2: mySubClass
sampleData = '''
{
"item1": "This is a test",
"item2": {
"sub_item1": "foo",
"sub_item2": "bar"
}
}
'''
myvar = myClass(**json.loads(sampleData))
myvar.item2.sub_item1 = 'modified'
print(myvar.json())

Without using any libraries other than the builtins:
import dataclasses
import json
#dataclasses.dataclass
class mySubClass:
sub_item1: str
sub_item2: str
#dataclasses.dataclass
class myClass:
item1: str
item2: mySubClass
#classmethod
def from_json(cls, string: str):
data: dict = json.loads(string)
if isinstance(data['item2'], dict):
data['item2'] = mySubClass(**data['item2'])
return cls(**data)
def json(self):
return json.dumps(self, default=lambda o: o.__dict__)
sampleData = '''
{
"item1": "This is a test",
"item2": {
"sub_item1": "foo",
"sub_item2": "bar"
}
}
'''
myvar = myClass.from_json(sampleData)
myvar.item2.sub_item1 = 'modified'
print(myvar.json())
Which becomes a bit easier, using a ser/de library like dataclass-wizard, or dataclasses-json:
import dataclasses
from dataclass_wizard import JSONWizard
#dataclasses.dataclass
class myClass(JSONWizard):
item1: str
item2: 'mySubClass'
# optional
#property
def json(self, indent=None):
return self.to_json(indent=indent)
#dataclasses.dataclass
class mySubClass:
sub_item1: str
sub_item2: str
sampleData = '''
{
"item1": "This is a test",
"item2": {
"sub_item1": "foo",
"sub_item2": "bar"
}
}
'''
c = myClass.from_json(sampleData)
print(c.json)
Disclaimer: I am the creator and maintenor of this library.

Related

Flatten nested Pydantic model

from typing import Union
from pydantic import BaseModel, Field
class Category(BaseModel):
name: str = Field(alias="name")
class OrderItems(BaseModel):
name: str = Field(alias="name")
category: Category = Field(alias="category")
unit: Union[str, None] = Field(alias="unit")
quantity: int = Field(alias="quantity")
When instantiated like this:
OrderItems(**{'name': 'Test','category':{'name': 'Test Cat'}, 'unit': 'kg', 'quantity': 10})
It returns data like this:
OrderItems(name='Test', category=Category(name='Test Cat'), unit='kg', quantity=10)
But I want the output like this:
OrderItems(name='Test', category='Test Cat', unit='kg', quantity=10)
How can I achieve this?
You should try as much as possible to define your schema the way you actually want the data to look in the end, not the way you might receive it from somewhere else.
UPDATE: Generalized solution (one nested field or more)
To generalize this problem, let's assume you have the following models:
from pydantic import BaseModel
class Foo(BaseModel):
x: bool
y: str
z: int
class _BarBase(BaseModel):
a: str
b: float
class Config:
orm_mode = True
class BarNested(_BarBase):
foo: Foo
class BarFlat(_BarBase):
foo_x: bool
foo_y: str
Problem: You want to be able to initialize BarFlat with a foo argument just like BarNested, but the data to end up in the flat schema, wherein the fields foo_x and foo_y correspond to x and y on the Foo model (and you are not interested in z).
Solution: Define a custom root_validator with pre=True that checks if a foo key/attribute is present in the data. If it is, it validates the corresponding object against the Foo model, grabs its x and y values and then uses them to extend the given data with foo_x and foo_y keys:
from pydantic import BaseModel, root_validator
from pydantic.utils import GetterDict
...
class BarFlat(_BarBase):
foo_x: bool
foo_y: str
#root_validator(pre=True)
def flatten_foo(cls, values: GetterDict) -> GetterDict | dict[str, object]:
foo = values.get("foo")
if foo is None:
return values
# Assume `foo` must ba valid `Foo` data:
foo = Foo.validate(foo)
return {
"foo_x": foo.x,
"foo_y": foo.y,
} | dict(values)
Note that we need to be a bit more careful inside a root validator with pre=True because the values are always passed in the form of a GetterDict, which is an immutable mapping-like object. So we cannot simply assign new values foo_x/foo_y to it like we would to a dictionary. But nothing is stopping us from returning the cleaned up data in the form of a regular old dict.
To demonstrate, we can throw some test data at it:
test_dict = {"a": "spam", "b": 3.14, "foo": {"x": True, "y": ".", "z": 0}}
test_orm = BarNested(a="eggs", b=-1, foo=Foo(x=False, y="..", z=1))
test_flat = '{"a": "beans", "b": 0, "foo_x": true, "foo_y": ""}'
bar1 = BarFlat.parse_obj(test_dict)
bar2 = BarFlat.from_orm(test_orm)
bar3 = BarFlat.parse_raw(test_flat)
print(bar1.json(indent=4))
print(bar2.json(indent=4))
print(bar3.json(indent=4))
The output:
{
"a": "spam",
"b": 3.14,
"foo_x": true,
"foo_y": "."
}
{
"a": "eggs",
"b": -1.0,
"foo_x": false,
"foo_y": ".."
}
{
"a": "beans",
"b": 0.0,
"foo_x": true,
"foo_y": ""
}
The first example simulates a common situation, where the data is passed to us in the form of a nested dictionary. The second example is the typical database ORM object situation, where BarNested represents the schema we find in a database. The third is just to show that we can still correctly initialize BarFlat without a foo argument.
One caveat to note is that the validator does not get rid of the foo key, if it finds it in the values. If your model is configured with Extra.forbid that will lead to an error. In that case, you'll just need to have an extra line, where you coerce the original GetterDict to a dict first, then pop the "foo" key instead of getting it.
Original post (flatten single field)
If you need the nested Category model for database insertion, but you want a "flat" order model with category being just a string in the response, you should split that up into two separate models.
Then in the response model you can define a custom validator with pre=True to handle the case when you attempt to initialize it providing an instance of Category or a dict for category.
Here is what I suggest:
from pydantic import BaseModel, validator
class Category(BaseModel):
name: str
class OrderItemBase(BaseModel):
name: str
unit: str | None
quantity: int
class OrderItemCreate(OrderItemBase):
category: Category
class OrderItemResponse(OrderItemBase):
category: str
#validator("category", pre=True)
def handle_category_model(cls, v: object) -> object:
if isinstance(v, Category):
return v.name
if isinstance(v, dict) and "name" in v:
return v["name"]
return v
Here is a demo:
if __name__ == "__main__":
insert_data = '{"name": "foo", "category": {"name": "bar"}, "quantity": 1}'
insert_obj = OrderItemCreate.parse_raw(insert_data)
print(insert_obj.json(indent=2))
... # insert into DB
response_obj = OrderItemResponse.parse_obj(insert_obj.dict())
print(response_obj.json(indent=2))
Here is the output:
{
"name": "foo",
"unit": null,
"quantity": 1,
"category": {
"name": "bar"
}
}
{
"name": "foo",
"unit": null,
"quantity": 1,
"category": "bar"
}
One of the benefits of this approach is that the JSON Schema stays consistent with what you have on the model. If you use this in FastAPI that means the swagger documentation will actually reflect what the consumer of that endpoint receives. You could of course override and customize schema creation, but... why? Just define the model correctly in the first place and avoid headache in the future.
Try this when instantiating:
myCategory = Category(name="test cat")
OrderItems(
name="test",
category=myCategory.name,
unit="kg",
quantity=10)
Well, i was curious, so here's the insane way:
class Category(BaseModel):
name: str = Field(alias="name")
class OrderItems(BaseModel):
name: str = Field(alias="name")
category: Category = Field(alias="category")
unit: Union[str, None] = Field(alias="unit")
quantity: int = Field(alias="quantity")
def json(self, *args, **kwargs) -> str:
self.__dict__.update({'category': self.__dict__['category'].name})
return super().json(*args, **kwargs)
c = Category(name='Dranks')
m = OrderItems(name='sodie', category=c, unit='can', quantity=1)
m.json()
And you get:
'{"name": "sodie", "category": "Dranks", "unit": "can", "quantity": 1}'
The sane way would probably be:
class Category(BaseModel):
name: str = Field(alias="name")
class OrderItems(BaseModel):
name: str = Field(alias="name")
category: Category = Field(alias="category")
unit: Union[str, None] = Field(alias="unit")
quantity: int = Field(alias="quantity")
c = Category(name='Dranks')
m = OrderItems(name='sodie', category=c, unit='can', quantity=1)
r = m.dict()
r['category'] = r['category']['name']

Convert a python dict to correct python BaseModel pydantic class

My requirement is to convert python dictionary which can take multiple forms into appropriate pydantic BaseModel class instance. Following are details:
class ConditionType(str, Enum):
EXPRESSION = 'EXPRESSION'
CYCLE_DUR_TREND = 'CYCLE_DUR_TREND'
class ConditionalExpressionProps(BaseConditionalProps):
conditional_expression: str
class CycleDurationTrendProps(BaseConditionalProps):
direction_up : bool = True
n : int = Field(1, ge=1, le=1000)
class ConditionalConfig(BaseModel):
cond_type: ConditionType = ConditionType.EXPRESSION
condition_prop: BaseConditionalProps
class SendNotificationChannel(BaseModel):
id: str
customer_id: str
conditional_config: Optional[ConditionalConfig]
I want to create a SendNotificationChannel instance with correct conditional_config set such that condition_prop becomes ConditionalExpressionProps or CycleDurationTrendProps based on the structure of dict.
For example, if dict is:
{
"id" : "1",
"customer_id" : "abc",
"conditional_config" : {
"cond_type": "EXPRESSION",
"conditional_expression" : ".s_num>10"
}
}
then on doing something like SendNotificationChannel(**dict) should make the conditional_config.condition_prop of type ConditionalExpressionProps, and similarly for CycleDurationTrendProps.
However when I'm doing this, its taking BaseConditionalProps.
>>> channel = {"conditional_config" : {"cond_type": "EXPRESSION", "condition_prop":{"conditional_expression":"ALPHA"}}}
>>> channel_obj = SendNotificationChannel(**channel)
>>> channel_obj
SendNotificationChannel(conditional_config=ConditionalConfig(cond_type=<ConditionType.EXPRESSION: 'EXPRESSION'>, condition_prop=BaseConditionalProps()))
>>> channel_obj.conditional_config.condition_prop
BaseConditionalProps()
>>>
I'd want to know how would someone solve this problem, or if I'm tackling this the incorrect way.
If I understand your question correctly you can to use .parse_obj method for your problem. Also you need to update the condition_prop field type. The following code it is example:
from enum import Enum
from typing import Optional, Union
from pydantic import BaseModel, Field
class ConditionType(str, Enum):
EXPRESSION = 'EXPRESSION'
CYCLE_DUR_TREND = 'CYCLE_DUR_TREND'
class BaseConditionalProps(BaseModel):
pass
class ConditionalExpressionProps(BaseConditionalProps):
conditional_expression: str
class CycleDurationTrendProps(BaseConditionalProps):
direction_up : bool = True
n : int = Field(1, ge=1, le=1000)
class ConditionalConfig(BaseModel):
cond_type: ConditionType = ConditionType.EXPRESSION
condition_prop: Union[ConditionalExpressionProps, CycleDurationTrendProps]
class SendNotificationChannel(BaseModel):
id: str
customer_id: str
conditional_config: Optional[ConditionalConfig]
channel_1 = {
"id" : "1",
"customer_id" : "abc",
"conditional_config" : {
"cond_type": "EXPRESSION",
"condition_prop": {
"conditional_expression" : ".s_num>10"
}
}
}
channel_1_obj = SendNotificationChannel.parse_obj(channel_1)
print(channel_1_obj)
# id='1' customer_id='abc' conditional_config=ConditionalConfig(cond_type=<ConditionType.EXPRESSION: 'EXPRESSION'>, condition_prop=ConditionalExpressionProps(conditional_expression='.s_num>10'))
channel_2 = {
"id" : "2",
"customer_id" : "def",
"conditional_config" : {
"cond_type": "CYCLE_DUR_TREND",
"condition_prop": {
"n" : 10
}
}
}
channel_2_obj = SendNotificationChannel.parse_obj(channel_2)
print(channel_2_obj)
# id='2' customer_id='def' conditional_config=ConditionalConfig(cond_type=<ConditionType.CYCLE_DUR_TREND: 'CYCLE_DUR_TREND'>, condition_prop=CycleDurationTrendProps(direction_up=True, n=10))

Can we use dataclasses as DTO in AWS python Lambda for nested JSON

I was looking out for a structure like DTO to store data from json in a AWS python Lambda.
I came across dataclasses in python and tried it to create simple dto for the json data.
My project contains lot of lambdas and heavy json parsing.
I had been using dict till now to handle parsed json.
please guide me if dataclasses as a standalone module from python is a right way to go for DTO kind of functionality in aws python lambda?
And I have included these dtos in lambda layer for reusability across other lambdas
I am worried that for nested data mainitainaing these dataclasses will be difficult.
Adding a code snippet for ref:
lambda_handler.py
from dataclasses import asdict
from custom_functions.epicervix_dto import EpicervixDto
from custom_functions.dto_class import Dejlog
def lambda_handler(event, context):
a = Dejlog(**event)
return {
'statusCode': 200,
'body': json.dumps(asdict(b))
}
dto_class.py from lambda layer
from dataclasses import dataclass, field
from typing import Any
#dataclass
class Dejlog:
PK: str
SK: str
eventtype: str
result: Any
type: str = field(init=False, repr=False, default=None)
status: str
event:str = field(init=False, repr=False, default=None)
One option can be to use the dataclass-wizard library for this task, which supports automatic key casing transforms, as well as de/serializing a nested dataclass model.
Here's a (mostly) complete example of how that could work:
from dataclasses import dataclass, field
from typing import Any
from dataclass_wizard import JSONWizard, json_field
#dataclass
class Dejlog(JSONWizard):
class _(JSONWizard.Meta):
"""Change key transform for dump (serialization) process; default transform is to `camelCase`."""
key_transform_with_dump = 'SNAKE'
PK: str
SK: str
result: Any
type: str = field(init=False, repr=False, default=None)
status: str
event: 'Event'
def response(self) -> dict:
return {
'statusCode': 200,
'body': self.to_json()
}
#dataclass
class Event:
event_type: str
# pass `dump=False` to exclude field from the dump (serialization) process
event: str = json_field('', init=False, dump=False, repr=False, default=None)
my_data: dict = {
'pk': '123',
'SK': 'test',
'result': {'key': 'value'},
'status': '200',
'event': {'eventType': 'something here'}
}
instance = Dejlog.from_dict(my_data)
print(f'{instance!r}')
print(instance.response())
Result:
Dejlog(PK='123', SK='test', result={'key': 'value'}, status='200', event=Event(event_type='something here'))
{'statusCode': 200, 'body': '{"pk": "123", "sk": "test", "result": {"key": "value"}, "type": null, "status": "200", "event": {"event_type": "something here"}}'}

deserialize json with objects

In python 3, how can I deserialize an object structure from json?
Example json:
{ 'name': 'foo',
'some_object': { 'field1': 'bar', 'field2' : '0' },
'some_list_of_objects': [
{ 'field1': 'bar1', 'field2' : '1' },
{ 'field1': 'bar2', 'field2' : '2' },
{ 'field1': 'bar3', 'field2' : '3' },
]
}
Here's my python code:
import json
class A:
name: str
some_object: B
some_list_of_objects: list(C)
def __init__(self, file_name):
with open(file_name, "r") as json_file:
self.__dict__ = json.load(json_file)
class B:
field1: int
field2: str
class C:
field1: int
field2: str
How to force some_object to be of type B and some_list_of_objects to be of type list of C?
As you're using Python 3, I would suggest using dataclasses to model your classes. This should improve your overall code quality and also eliminate the need to explicltly declare an __init__ constructor method for your class, for example.
If you're on board with using a third-party library, I'd suggest looking into an efficient JSON serialization library like the dataclass-wizard that performs implicit type conversion - for example, string to annotated int as below. Note that I'm using StringIO here, which is a file-like object containing a JSON string to de-serialize into a nested class model.
Note: the following approach should work in Python 3.7+.
from __future__ import annotations
from dataclasses import dataclass
from io import StringIO
from dataclass_wizard import JSONWizard
json_data = StringIO("""
{ "name": "foo",
"some_object": { "field1": "bar", "field2" : "0" },
"some_list_of_objects": [
{ "field1": "bar1", "field2" : "1" },
{ "field1": "bar2", "field2" : "2" },
{ "field1": "bar3", "field2" : "3" }
]
}
""")
#dataclass
class A(JSONWizard):
name: str
some_object: B
some_list_of_objects: list[C]
#dataclass
class B:
field1: str
field2: int
#dataclass
class C:
field1: str
field2: int
a = A.from_json(json_data.read())
print(f'{a!r}') # alternatively: print(repr(a))
Output
A(name='foo', some_object=B(field1='bar', field2=0), some_list_of_objects=[C(field1='bar1', field2=1), C(field1='bar2', field2=2), C(field1='bar3', field2=3)])
Loading from a JSON file
As per the suggestions in this post, I would discourage overriding the constructor method to pass the name of a JSON file to load the data from. Instead, I would suggest creating a helper class method as below, that can be invoked like A.from_json_file('file.json') if desired.
#classmethod
def from_json_file(cls, file_name: str):
"""Deserialize json file contents into an A object."""
with open(file_name, 'r') as json_file:
return cls.from_dict(json.load(json_file))
Suggestions
Note that variable annotations (or annotations in general) are subscripted using square brackets [] rather than parentheses as appears in the original version above.
some_list_of_objects: list(C)
In the above solution, I've instead changed that to:
some_list_of_objects: list[C]
This works because using subscripted values in standard collections was introduced in PEP 585. However, using the from __future__ import annotations statement introduced to Python 3.7+ effectively converts all annotations to forward-declared string values, so that new-style annotations that normally only would work in Python 3.10, can also be ported over to Python 3.7+ as well.
One other change I made, was in regards to swapping out the order of declared class annotations. For example, note the below:
class B:
field1: int
field2: str
However, note the corresponding field in the JSON data, that would be deserialized to a B object:
'some_object': { 'field1': 'bar', 'field2' : '0' },
In the above implementation, I've swapped out the field annotations in such cases, so class B for instance is declared as:
class B:
field1: str
field2: int

Initialize Python dataclass from dictionary

Let's say I want to initialize the below dataclass
from dataclasses import dataclass
#dataclass
class Req:
id: int
description: str
I can of course do it in the following way:
data = make_request() # gives me a dict with id and description as well as some other keys.
# {"id": 123, "description": "hello", "data_a": "", ...}
req = Req(data["id"], data["description"])
But, is it possible for me to do it with dictionary unpacking, given that the keys I need is always a subset of the dictionary?
req = Req(**data) # TypeError: __init__() got an unexpected keyword argument 'data_a'
Here's a solution that can be used generically for any class. It simply filters the input dictionary to exclude keys that aren't field names of the class with init==True:
from dataclasses import dataclass, fields
#dataclass
class Req:
id: int
description: str
def classFromArgs(className, argDict):
fieldSet = {f.name for f in fields(className) if f.init}
filteredArgDict = {k : v for k, v in argDict.items() if k in fieldSet}
return className(**filteredArgDict)
data = {"id": 123, "description": "hello", "data_a": ""}
req = classFromArgs(Req, data)
print(req)
Output:
Req(id=123, description='hello')
UPDATE: Here's a variation on the strategy above which creates a utility class that caches dataclasses.fields for each dataclass that uses it (prompted by a comment by #rv.kvetch expressing performance concerns around duplicate processing of dataclasses.fields by multiple invocations for the same dataclass).
from dataclasses import dataclass, fields
class DataClassUnpack:
classFieldCache = {}
#classmethod
def instantiate(cls, classToInstantiate, argDict):
if classToInstantiate not in cls.classFieldCache:
cls.classFieldCache[classToInstantiate] = {f.name for f in fields(classToInstantiate) if f.init}
fieldSet = cls.classFieldCache[classToInstantiate]
filteredArgDict = {k : v for k, v in argDict.items() if k in fieldSet}
return classToInstantiate(**filteredArgDict)
#dataclass
class Req:
id: int
description: str
req = DataClassUnpack.instantiate(Req, {"id": 123, "description": "hello", "data_a": ""})
print(req)
req = DataClassUnpack.instantiate(Req, {"id": 456, "description": "goodbye", "data_a": "my", "data_b": "friend"})
print(req)
#dataclass
class Req2:
id: int
description: str
data_a: str
req2 = DataClassUnpack.instantiate(Req2, {"id": 123, "description": "hello", "data_a": "world"})
print(req2)
print("\nHere's a peek at the internals of DataClassUnpack:")
print(DataClassUnpack.classFieldCache)
Output:
Req(id=123, description='hello')
Req(id=456, description='goodbye')
Req2(id=123, description='hello', data_a='world')
Here's a peek at the internals of DataClassUnpack:
{<class '__main__.Req'>: {'description', 'id'}, <class '__main__.Req2'>: {'description', 'data_a', 'id'}}
You can possibly introduce a new function that will perform the given conversion from dict to dataclass:
import inspect
from dataclasses import dataclass
#dataclass
class Req:
id: int
description: str
def from_dict_to_dataclass(cls, data):
return cls(
**{
key: (data[key] if val.default == val.empty else data.get(key, val.default))
for key, val in inspect.signature(cls).parameters.items()
}
)
from_dict_to_dataclass(Req, {"id": 123, "description": "hello", "data_a": ""})
# Output: Req(id=123, description='hello')
Note, if val.default == val.empty condition is needed in order to check if your dataclass has a default value set. If it's true then we should take the given value into consideration when constructing a dataclass.
A workaround to this is by intercepting the __init__ of the dataclass and filter out the fields that are not recognized.
from dataclasses import dataclass, fields
#dataclass
class Req1:
id: int
description: str
#dataclass
class Req2:
id: int
description: str
def __init__(self, **kwargs):
for key, value in kwargs.items():
if key in REQ2_FIELD_NAMES:
setattr(self, key, value)
# To not re-evaluate the field names for each and every creation of Req2, list them here.
REQ2_FIELD_NAMES = {field.name for field in fields(Req2)}
data = {
"id": 1,
"description": "some",
"data_a": None,
}
try:
print("Call for Req1:", Req1(**data))
except Exception as error:
print("Call for Req1:", error)
try:
print("Call for Req2:", Req2(**data))
except Exception as error:
print("Call for Req2:", error)
Output:
Call for Req1: __init__() got an unexpected keyword argument 'data_a'
Call for Req2: Req2(id=1, description='some')
Related question:
How does one ignore extra arguments passed to a data class?

Categories

Resources