AWS CDK add port mappings - python

I've been trying a lot of different things, but I can't seem to get this to work for me.
I'm trying to declare ports for my container in aws cdk within the ecs.TaskDefinition contruct.
I keep getting an error that the array type was expected even though I'm using the specififed ecs.PortMapping construct that is needed for the port_mappings parameter.
File "/home/user/.local/share/virtualenvs/AWS_Automation--K5ZV1iW/lib/python3.9/site-packages/aws_cdk/aws_ecs/__init__.py", line 27675, in add_container
return typing.cast(ContainerDefinition, jsii.invoke(self, "addContainer", [id, props]))
File "/home/user/.local/share/virtualenvs/AWS_Automation--K5ZV1iW/lib/python3.9/site-packages/jsii/_kernel/__init__.py", line 143, in wrapped
return _recursize_dereference(kernel, fn(kernel, *args, **kwargs))
File "/home/user/.local/share/virtualenvs/AWS_Automation--K5ZV1iW/lib/python3.9/site-packages/jsii/_kernel/__init__.py", line 355, in invoke
response = self.provider.invoke(
File "/home/user/.local/share/virtualenvs/AWS_Automation--K5ZV1iW/lib/python3.9/site-packages/jsii/_kernel/providers/process.py", line 359, in invoke
return self._process.send(request, InvokeResponse)
File "/home/user/.local/share/virtualenvs/AWS_Automation--K5ZV1iW/lib/python3.9/site-packages/jsii/_kernel/providers/process.py", line 326, in send
raise JSIIError(resp.error) from JavaScriptError(resp.stack)
jsii.errors.JSIIError: Expected array type, got {"$jsii.struct":{"fqn":"aws-cdk-lib.aws_ecs.PortMapping","data":{"containerPort":8501,"hostPort":null,"protocol":null}}}
Any help would be appreciated. My relevant code is below.
from aws_cdk import (aws_ec2 as ec2, aws_ecs as ecs,
aws_ecs_patterns as ecs_patterns,
aws_ecs as ecs,
aws_ecr as ecr,
aws_route53 as route53,
aws_certificatemanager as certificatemanager,
aws_elasticloadbalancingv2 as elbv2)
container_port_mappings = ecs.PortMapping(container_port = 8501)
task_def = ecs.TaskDefinition(self,
'TD',
compatibility = ecs.Compatibility.FARGATE,
cpu = '512',
memory_mib = '1024'
)
task_def.add_container("SL_container",
image=ecs.ContainerImage.from_ecr_repository(_repo),
port_mappings = container_port_mappings
)

port_mappings accepts a list of PortMapping objects:
container_port_mappings = [ecs.PortMapping(container_port = 8501)]
BTW, CDK supports Python type annotations, which help avoid these types of errors.

Related

Dagster cannot connect to mongodb locally

I was going through Dagster tutorials and thought it be a good exercise to connect to my local mongodb.
from dagster import get_dagster_logger, job, op
from pymongo import MongoClient
#op
def connection():
client = MongoClient("mongodb://localhost:27017/")
return client["development"]
#job
def execute():
client = connection()
get_dagster_logger().info(f"Connection: {client} ")
Dagster error:
dagster.core.errors.DagsterExecutionHandleOutputError: Error occurred while handling output "result" of step "connection":
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/execute_plan.py", line 232, in dagster_event_sequence_for_step
for step_event in check.generator(step_events):
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/execute_step.py", line 348, in core_dagster_event_sequence_for_step
for evt in _type_check_and_store_output(step_context, user_event, input_lineage):
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/execute_step.py", line 405, in _type_check_and_store_output
for evt in _store_output(step_context, step_output_handle, output, input_lineage):
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/execute_step.py", line 534, in _store_output
for elt in iterate_with_context(
File "/usr/local/lib/python3.9/site-packages/dagster/utils/__init__.py", line 400, in iterate_with_context
return
File "/usr/local/Cellar/python#3.9/3.9.12/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py", line 137, in __exit__
self.gen.throw(typ, value, traceback)
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/utils.py", line 73, in solid_execution_error_boundary
raise error_cls(
The above exception was caused by the following exception:
TypeError: cannot pickle '_thread.lock' object
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/utils.py", line 47, in solid_execution_error_boundary
yield
File "/usr/local/lib/python3.9/site-packages/dagster/utils/__init__.py", line 398, in iterate_with_context
next_output = next(iterator)
File "/usr/local/lib/python3.9/site-packages/dagster/core/execution/plan/execute_step.py", line 524, in _gen_fn
gen_output = output_manager.handle_output(output_context, output.value)
File "/usr/local/lib/python3.9/site-packages/dagster/core/storage/fs_io_manager.py", line 124, in handle_output
pickle.dump(obj, write_obj, PICKLE_PROTOCOL)
I have tested this locally in a ipython and it works so the issue is related to dagster.
The default IOManager requires that inputs and outputs to ops be pickleable - it's likely that your MongoClient is not. You might want to try refactoring this to use Dagster's #resource method. This allows you to define resources externally to your #op, and makes mocking those resources later in tests really easy. You code would look something like this:
from dagster import get_dagster_logger, job, op, resource
from pymongo import MongoClient
#resource
def mongo_client():
client = MongoClient("mongodb://localhost:27017/")
return client["development"]
#op(
required_resource_keys={'mongo_client'}
)
def test_client(context):
client = context.resources.mongo_client
get_dagster_logger().info(f"Connection: {client} ")
#job(
resource_defs={'mongo_client': mongo_client}
)
def execute():
test_client()
Notice too that I moved the testing code into another #op, and then only called that op from within the execute #job. This is because the code within a job definition gets compiled at load time, and is only used to describe the graph of ops to execute. All general programming to carry out tasks needs to be contained within #op code.
The really neat thing about the #resource pattern is that this makes testing with mock resources or more generally swapping resources incredibly easy. Lets say you wanted a mocked client so you could run your job code without actually hitting the database. You could do something like the following:
#resource
def mocked_mongo_client():
from unittest.mock import MagicMock
return MagicMock()
#graph
def execute_graph():
test_client()
execute_live = execute_graph.to_job(name='execute_live',
resource_defs={'mongo_client': mongo_client,})
execute_mocked = execute_graph.to_job(name='execute_mocked',
resource_defs={'mongo_client': mocked_mongo_client,})
This uses Dagster's #graph pattern to describe a DAG of ops, then use the .to_job() method on the GraphDefinition object to configure the graph in different ways. This way you can have the same exact underlying op structure, but pass different resources, tags, executors, etc.

PyRal getAttachment

I have a fairly simple use-case but i'm not understanding the error message i'm receiving.
I'm using the requests and pyral modules, pyral (http://pyral.readthedocs.io/en/latest/interface.html#) is really just a wrapper for Rally's Restful api. My goal is to get a file (attachment) from a Rally (a CA product) UserStory and store it to a local file system.
For context, here is my environment setup (authenticate to Rally and create an object). I've obviously removed authentication information.
from pyral import Rally, rallyWorkset
options = [arg for arg in sys.argv[1:] if arg.startswith('--')]
args = [arg for arg in sys.argv[1:] if arg not in options]
server, user, password, apikey, workspace, project = rallyWorkset(options)
rally = Rally(server='rally1.rallydev.com',
user='**********', password='***********',
apikey="**************",
workspace='**************', project='**************',
server_ping=False)
After that I get a response object for just one user story (see the query for US845), i do this just to simplify the problem.
r = rally.get('UserStory', fetch = True, projectScopeDown=True, query = 'FormattedID = US845')
and then I use the built-in iterator to get the user story from the RallyRESTResponse object.
us = r.next()
from there it feels like I should be able to easily use the getAttachment() method that accepts a artifact (us) and filename (name of an attachment). I'm able to use getAttachmentNames(us) to return a list of attachment names. The issue arrises when i try something like
attachment_names = rally.getAttachmentNames(us) #get attachments for this UserStory
attachment_file = rally.getAttachment(us, attachment_names[0]) #Try to get the first attachment
returns an error like this
Traceback (most recent call last):
File "<ipython-input-81-a4a342a59c5a>", line 1, in <module>
attachment_file = rally.getAttachment(us, attachment_names[0])
File "C:\Miniconda3\lib\site-packages\pyral\restapi.py", line 1700, in getAttachment
att.Content = base64.decodebytes(att_content.Content) # maybe further txfm to Unicode ?
File "C:\Miniconda3\lib\base64.py", line 552, in decodebytes
_input_type_check(s)
File "C:\Miniconda3\lib\base64.py", line 520, in _input_type_check
raise TypeError(msg) from err
TypeError: expected bytes-like object, not str
I receive a similar error if i try to use
test_obj = rally.getAttachments(us)
Which returns an error like this:
Traceback (most recent call last):
File "<ipython-input-82-06a8cd525177>", line 1, in <module>
rally.getAttachments(us)
File "C:\Miniconda3\lib\site-packages\pyral\restapi.py", line 1721, in getAttachments
attachments = [self.getAttachment(artifact, attachment_name) for attachment_name in attachment_names]
File "C:\Miniconda3\lib\site-packages\pyral\restapi.py", line 1721, in <listcomp>
attachments = [self.getAttachment(artifact, attachment_name) for attachment_name in attachment_names]
File "C:\Miniconda3\lib\site-packages\pyral\restapi.py", line 1700, in getAttachment
att.Content = base64.decodebytes(att_content.Content) # maybe further txfm to Unicode ?
File "C:\Miniconda3\lib\base64.py", line 552, in decodebytes
_input_type_check(s)
File "C:\Miniconda3\lib\base64.py", line 520, in _input_type_check
raise TypeError(msg) from err
TypeError: expected bytes-like object, not str
It seems that i'm fundamentally misunderstanding the parameters that this method requires? Has anyone been able to do this successfully before? For what it's worth i have no issues using the addAttachment() method with a workflow similar to the above. I've tried converting the filename (string) with the bytes() method to utf-8 but that didn't help.
I've also looked at this example in the pyral source, but i receive exactly the same error when trying to execute that.
https://github.com/klehman-rally/pyral/blob/master/examples/get_attachments.py
It looks like the issue in restapi.py script - there is no decodebytes method in base64 library:
att.Content = base64.decodebytes(att_content.Content)
All available methods are described at:
RFC 3548: Base16, Base32, Base64 Data Encodings
So, workaround is to replace decodebytes by base64.b64decode in restapi.py. At least, it works me.
E.g. location at Mac OS X:
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyral/restapi.py
I have used the below code to get all attachment since getAttachments is not working as expected. it will create a file in the current dir with the same name.
import sys
import string
import base64
from pyral import rallyWorkset, Rally,RallyRESTResponse
rally = Rally(server, user=USER_NAME, password=PASSWORD, workspace=workspace, project=project)
criterion = 'FormattedID = US57844'
response = rally.get('HierarchicalRequirement', query=criterion, order="FormattedID",pagesize=200, limit=400, projectScopeDown=True)
artifact = response.next()
context, augments = rally.contextHelper.identifyContext()
for att in artifact.Attachments:
resp = rally._getResourceByOID(context, 'AttachmentContent', att.Content.oid, project=None)
if resp.status_code not in [200, 201, 202]:
break
res = RallyRESTResponse(rally.session, context, "AttachmentContent.x", resp, "full", 1)
if res.errors or res.resultCount != 1:
print("breaking the for loop")
att_content = res.next()
cont = att_content.Content
x = base64.b64decode(cont)
output = open(att.Name, 'wb')
output.write(x)

Python can't import WMI under special circumstance

I've created a standalone exe Windows service written in Python and built with pyInstaller. When I try to import wmi, an exception is thrown.
What's really baffling is that I can do it without a problem if running the code in a foreground exe, or a foreground python script, or a python script running as a background service via pythonservice.exe!
Why does it fail under this special circumstance of running as a service exe?
import wmi
Produces this error for me:
com_error: (-2147221020, 'Invalid syntax', None, None)
Here's the traceback:
Traceback (most recent call last):
File "<string>", line 43, in onRequest
File "C:\XXX\XXX\XXX.pyz", line 98, in XXX
File "C:\XXX\XXX\XXX.pyz", line 31, in XXX
File "C:\XXX\XXX\XXX.pyz", line 24, in XXX
File "C:\XXX\XXX\XXX.pyz", line 34, in XXX
File "C:\Program Files (x86)\PyInstaller-2.1\PyInstaller\loader\pyi_importers.py", line 270, in load_module
File "C:\XXX\XXX\out00-PYZ.pyz\wmi", line 157, in <module>
File "C:\XXX\XXX\out00-PYZ.pyz\win32com.client", line 72, in GetObject
File "C:\XXX\XXX\out00-PYZ.pyz\win32com.client", line 87, in Moniker
wmi.py line 157 has a global call to GetObject:
obj = GetObject ("winmgmts:")
win32com\client__init.py__ contains GetObject(), which ends up calling Moniker():
def GetObject(Pathname = None, Class = None, clsctx = None):
"""
Mimic VB's GetObject() function.
ob = GetObject(Class = "ProgID") or GetObject(Class = clsid) will
connect to an already running instance of the COM object.
ob = GetObject(r"c:\blah\blah\foo.xls") (aka the COM moniker syntax)
will return a ready to use Python wrapping of the required COM object.
Note: You must specifiy one or the other of these arguments. I know
this isn't pretty, but it is what VB does. Blech. If you don't
I'll throw ValueError at you. :)
This will most likely throw pythoncom.com_error if anything fails.
"""
if clsctx is None:
clsctx = pythoncom.CLSCTX_ALL
if (Pathname is None and Class is None) or \
(Pathname is not None and Class is not None):
raise ValueError("You must specify a value for Pathname or Class, but not both.")
if Class is not None:
return GetActiveObject(Class, clsctx)
else:
return Moniker(Pathname, clsctx)
The first line in Moniker(), i.e. MkParseDisplayName() is where the exception is encountered:
def Moniker(Pathname, clsctx = pythoncom.CLSCTX_ALL):
"""
Python friendly version of GetObject's moniker functionality.
"""
moniker, i, bindCtx = pythoncom.MkParseDisplayName(Pathname)
dispatch = moniker.BindToObject(bindCtx, None, pythoncom.IID_IDispatch)
return __WrapDispatch(dispatch, Pathname, clsctx=clsctx)
Note: I tried using
pythoncom.CoInitialize()
which apparently solves this import problem within a thread, but that didn't work...
I also face the same issue and I figure out this issue finally,
import pythoncom and CoInitialize pythoncom.CoInitialize (). They import wmi
import pythoncom
pythoncom.CoInitialize ()
import wmi
I tried solving this countless ways. In the end, I threw in the towel and had to just find a different means of achieving the same goals I had with wmi.
Apparently that invalid syntax error is thrown when trying to create an object with an invalid "moniker name", which can simply mean the service, application, etc. doesn't exist on the system. Under this circumstance "winmgmts" just can't be found at all it seems! And yes, I tried numerous variations on that moniker with additional specs, and I tried running the service under a different user account, etc.
Honestly I didn't dig in order to understand why this occurs.
Anyway, the below imports solved my problem - which was occurring only when ran from a Flask instance:
import os
import pythoncom
pythoncom.CoInitialize()
from win32com.client import GetObject
import wmi
The error "com_error: (-2147221020, 'Invalid syntax', None, None)" is exactly what popped up in my case so I came here after a long time of searching the web and voila:
Under this circumstance "winmgmts" just can't be found at all it
seems!
This was the correct hint for because i had just a typo , used "winmgmt:" without trailing 's'. So invalid sythax refers to the first methods parameter, not the python code itself. o_0 Unfortunately I can't find any reference which objects we can get with win32com.client.GetObject()... So if anybody has a hint to which params are "allowed" / should work, please port it here. :-)
kind regards
ChrisPHL

gae mapreduce generator error no attribute validate_bucket_name

This is my first GAE project. I got my serial code to work on the dev_app (I am using the GoogleAppEngineLauncher on Mac). Since my code takes too long to finish I am trying to use mapreduce to speed up the process. I tried the following code but keep getting the following error. I am not sure if this is because of some error in my code or if I am missing any statements in the *yaml files. Kindly help!
class ShuffleDictPipeline(base_handler.PipelineBase):
def run(self, *args, **kwargs):
""" run """
mapper_params = {
"entity_kind": "coremic.RandomDict",
"batch_size": 500,
"filters": [("idx", "=", ndb_custom_key)]
}
reducer_params = {
"mime_type": "text/plain"
}
output = yield mapreduce_pipeline.MapreducePipeline(
"calc_shuff_core_microb",
mapper_spec="coremic.shuffle_dict_coremic_map",
mapper_params=mapper_params,
reducer_spec="coremic.shuffle_dict_coremic_reduce",
reducer_params=reducer_params,
input_reader_spec="mapreduce.input_readers.DatastoreInputReader",
output_writer_spec="mapreduce.output_writers.BlobstoreOutputWriter",
shards=16)
yield StoreOutput(output)
Error:
ERROR 2016-03-05 20:03:21,706 pipeline.py:2432]
Generator mapreduce.mapper_pipeline.MapperPipeline(*(u'calc_shuff_core_microb-map', u'coremic.shuffle_dict_coremic_map', u'mapreduce.input_readers.DatastoreInputReader'), **{'output_writer_spec': u'mapreduce.output_writers._GoogleCloudStorageKeyValueOutputWriter', 'params': {u'batch_size': 500, u'bucket_name': u'app_default_bucket', u'entity_kind': u'coremic.RandomDict',... (324 bytes))#b96dd511c0454fd99413d267b7388857 raised exception. AttributeError: 'NoneType' object has no attribute 'validate_bucket_name'
Traceback (most recent call last):
File "/Users/rr/GAE/coremic/pipeline/pipeline.py", line 2156, in evaluate
self, pipeline_key, root_pipeline_key, caller_output)
File "/Users/rr/GAE/coremic/pipeline/pipeline.py", line 1110, in _run_internal
return self.run(*self.args, **self.kwargs)
File "/Users/rr/GAE/coremic/mapreduce/mapper_pipeline.py", line 102, in run
queue_name=self.queue_name,
File "/Users/rr/GAE/coremic/mapreduce/control.py", line 125, in start_map
in_xg_transaction=in_xg_transaction)
File "/Users/rr/GAE/coremic/mapreduce/handlers.py", line 1730, in _start_map
mapper_output_writer_class.validate(mapper_spec)
File "/Users/rr/GAE/coremic/mapreduce/output_writers.py", line 1075, in validate
return cls.WRITER_CLS.validate(mapper_spec)
File "/Users/rr/GAE/coremic/mapreduce/output_writers.py", line 723, in validate
super(_GoogleCloudStorageOutputWriter, cls).validate(mapper_spec)
File "/Users/rr/GAE/coremic/mapreduce/output_writers.py", line 604, in validate
cloudstorage.validate_bucket_name(
AttributeError: 'NoneType' object has no attribute 'validate_bucket_name'
I am still working on getting everything to work, but couple of things helped.
1.1 Install google cloud storage client lib on SDK to access the bucket. cloud google com appengine docs python googlecloudstorageclient
1.2 Set up (create) the bucket.
Then follow steps from https://plus.google.com/+EmlynORegan/posts/6NPaRKxMkf3
Note how the mapper params has changed.
2 - In mapreduce pipelines, replace
"mapreduce.output_writers.BlobstoreOutputWriter"
with
"mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter"
3 - update reducer params to:
{
"mime_type": "text/plain",
"output_writer": {
"bucket_name": ,
"tmp_bucket_name":
}
}
Other very useful link:
https://gist.github.com/nlathia/ab670053ed460c4ca02f/89178e132b894fe5467c09164d3827f70e4ae2f8
You can do 1 of 2 things. Either
Create a google cloud storage bucket associated with your project, because at the moment none is associated with it, hence the NoneType. Once done, you can add that to your mapper_params.
mapper_params = {
...
"bucket_name": "<your google cloud storage bucket name>",
...
}
OR
Create a default bucket by visiting your app engine's application settings in the cloud console https://console.cloud.google.com/appengine/settings?project=
Install GoogleAppEngineCloudStorageClient in your project.
output_writes.py does the following:
try:
# Check if the full cloudstorage package exists. The stub part is in runtime.
cloudstorage = None
import cloudstorage
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
# "if" is needed because apphosting/ext/datastore_admin:main_test fails.
if cloudstorage:
from cloudstorage import cloudstorage_api
from cloudstorage import errors as cloud_errors
except ImportError:
pass # CloudStorage library not available
So, when importing cloudstorage fails, the value of cloudstorage variable = None. And that causes the exception later.

AttributeError for custom types with mixer

I have stumbled into a pretty interesting bug in klen mixer library for Python.
https://github.com/klen/mixer
This bug occurs whenever you try to setup a model with a column using sqlalchemy.dialect.postgresql.INET. Trying to blend a model with this in will bring the following trace...
mixer: ERROR: Traceback (most recent call last):
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 612, in blend
return type_mixer.blend(**values)
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 130, in blend
for name, value in defaults.items()
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 130, in <genexpr>
for name, value in defaults.items()
File "/home/cllamach/PythonProjects/mixer/mixer/mix_types.py", line 220, in gen_value
return type_mixer.gen_field(field)
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 209, in gen_field
return self.gen_value(field.name, field, unique=unique)
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 254, in gen_value
gen = self.get_generator(field, field_name, fake=fake)
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 304, in get_generator
field.scheme, field_name, fake, kwargs=field.params)
File "/home/cllamach/PythonProjects/mixer/mixer/backend/sqlalchemy.py", line 178, in make_generator
stype, field_name=field_name, fake=fake, args=args, kwargs=kwargs)
File "/home/cllamach/PythonProjects/mixer/mixer/main.py", line 324, in make_generator
fabric = self.__factory.gen_maker(scheme, field_name, fake)
File "/home/cllamach/PythonProjects/mixer/mixer/factory.py", line 157, in gen_maker
if not func and fcls.__bases__:
AttributeError: Mixer (<class 'tests.test_flask.IpAddressUser'>): 'NoneType' object has no attribute '__bases__'
I debugged this error all the way down to a couple of methods in the code, the first method get_generator tries the following...
if key not in self.__generators:
self.__generators[key] = self.make_generator(
field.scheme, field_name, fake, kwargs=field.params)
And heres comes the weird part. Here in this statement field.scheme has a value, specifically a Column object from sqlalchemy, but when is passed down to the make_generetor method is passed as a None. So far i have seen no other piece of code in between these two methods, have debugged with ipdb and others. Have tried calling the method manually with ipdb and still the scheme is passed None.
I know this can be deemed as too particular an issue but i would like to know if someone has encountered this kind of issues before, as this is a first for me.
Mixer is choking on an unknown column type. It stores all the ones it knows in GenFactory.types as a dict and calls types.get(column_type), which of course will return None for an unrecognized type. I ran into this because I defined a couple custom SQLAlchemy types with sqlalchemy.types.TypeDecorator.
To solve this problem, You'll have to monkey-patch your types into Mixer's type system. Here's how I did it:
def _setup_mixer_with_custom_types():
from mixer._faker import faker
from mixer.backend.sqlalchemy import (
GenFactory,
mixer,
)
from myproject.customcolumntypes import (
IntegerTimestamp,
UTCDateTimeTimestamp,
)
def arrow_generator():
return arrow.get(faker.date_time())
GenFactory.generators[IntegerTimestamp] = arrow_generator
GenFactory.generators[UTCDateTimeTimestamp] = arrow_generator
return mixer
mixer = _setup_mixer_with_custom_types()
Note that you don't actually have to touch GenFactory.types because it's just an intermediary step that Mixer skips if it can find your type directly on GenFactory.generators.
In my case, I also had to define a custom generator (to accommodate Arrow), but you may not need to. Mixer uses the fake-factory library to generate fake data, and you can see what they're using by looking at the GenFactory.generators dict.
You have to get the column type into GenFactory.generators, which by default only contains some standard types. Instead of monkey-patching, you might subclass GenFactory and then specify your own class upon Mixer generation.
In this case, we'll customize the already subclassed GenFactory and Mixer variants from backend.sqlalchemy:
from mixer.backend.sqlalchemy import Mixer, GenFactory
from customtypes import CustomType # The column type
def get_mixer():
class CustomFactory(GenFactory):
# No need to preserve entries, the parent class attribute is
# automatically extended through GenFactory's metaclass
generators = {
CustomType: lambda: 42 # Or any other function
}
return Mixer(factory=CustomFactory)
You can use whatever function you like as generator, it just has to return the desired value. Sometimes, directly using something from faker might be enough.
In the same way, you can also customize the other attributes of GenFactory, i.e. fakers and types.

Categories

Resources