I'm experiencing some odd behavior with SQLAlchemy not iterating over all of the results from a query.
For example, I have the following python code:
engine = create_engine(<connection string>)
Session = sessionmaker(bind=engine)
session = Session()
columns = session.query(Column)
counter = 1
for c in columns
print(counter)
counter = counter + 1
print('count: ' + str(columns.count()))
where Column is a class that I've defined and mapped in the usual SQLAlchemy way:
from base import Base
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean
class Column(Base):
__tablename__ = 'COLUMNS'
__table_args__ = {'schema' : 'INFORMATION_SCHEMA'}
table_catalog = Column(String)
table_schema = Column(String)
table_name = Column(String)
column_name = Column(String, primary_key=True)
data_type = Column(String)
From my query, I'm expecting 7034 rows to be returned and this is what the final print statement prints out (for columns.count()), but the for loop only ever gets up to 2951 printing out counter.
If I do anything else with the returned data in the for loop, only the 2951 get processed, not all 7034.
Does anyone know why I'm experiencing this discrepancy, and how can I iterate over all 7034 rows, not just the 2951?
I've figured out why I wasn't getting the results I was expecting (I did something silly).
The 'column_name' field in the table the Column class maps to ins't unique, therefore picking it as a primary key filtered out only unique values - which since there are duplicates results in less rows being returned than I expected.
I fixed it by updating the definition of the Column mapping to:
from base import Base
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean
class Column(Base):
__tablename__ = 'COLUMNS'
__table_args__ = {'schema' : 'INFORMATION_SCHEMA'}
table_catalog = Column(String, primary_key=True)
table_schema = Column(String, primary_key=True)
table_name = Column(String, primary_key=True)
column_name = Column(String, primary_key=True)
data_type = Column(String)
Related
I need to create a PostgreSQL Full Text Search index in Python with SQLAlchemy. Here's what I want in SQL:
CREATE TABLE person ( id INTEGER PRIMARY KEY, name TEXT );
CREATE INDEX person_idx ON person USING GIN (to_tsvector('simple', name));
Now how do I do the second part with SQLAlchemy when using the ORM:
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
You could create index using Index in __table_args__. Also I use a function to create ts_vector to make it more tidy and reusable if more than one field is required. Something like below:
from sqlalchemy.dialects import postgresql
def create_tsvector(*args):
exp = args[0]
for e in args[1:]:
exp += ' ' + e
return func.to_tsvector('english', exp)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
__ts_vector__ = create_tsvector(
cast(func.coalesce(name, ''), postgresql.TEXT)
)
__table_args__ = (
Index(
'idx_person_fts',
__ts_vector__,
postgresql_using='gin'
)
)
Update:
A sample query using index (corrected based on comments):
people = Person.query.filter(Person.__ts_vector__.match(expressions, postgresql_regconfig='english')).all()
The answer from #sharez is really useful (especially if you need to concatenate columns in your index). For anyone looking to create a tsvector GIN index on a single column, you can simplify the original answer approach with something like:
from sqlalchemy import Column, Index, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
Base = declarative_base()
class Example(Base):
__tablename__ = 'examples'
id = Column(Integer, primary_key=True)
textsearch = Column(String)
__table_args__ = (
Index(
'ix_examples_tsv',
func.to_tsvector('english', textsearch),
postgresql_using='gin'
),
)
Note that the comma following Index(...) in __table_args__ is not a style choice, the value of __table_args__ must be a tuple, dictionary, or None.
If you do need to create a tsvector GIN index on multiple columns, here is another way to get there using text().
from sqlalchemy import Column, Index, Integer, String, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
Base = declarative_base()
def to_tsvector_ix(*columns):
s = " || ' ' || ".join(columns)
return func.to_tsvector('english', text(s))
class Example(Base):
__tablename__ = 'examples'
id = Column(Integer, primary_key=True)
atext = Column(String)
btext = Column(String)
__table_args__ = (
Index(
'ix_examples_tsv',
to_tsvector_ix('atext', 'btext'),
postgresql_using='gin'
),
)
Thanks for this question and answers.
I'd like to add a bit more in case ppl using alembic to manage versions by
using autogenerate
which creating the index seems not be detected.
We might end up writing our own alter script which look like.
"""add fts idx
Revision ID: e3ce1ce23d7a
Revises: 079c4455d54d
Create Date:
"""
# revision identifiers, used by Alembic.
revision = 'e3ce1ce23d7a'
down_revision = '079c4455d54d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('idx_content_fts', 'table_name',
[sa.text("to_tsvector('english', content)")],
postgresql_using='gin')
def downgrade():
op.drop_index('idx_content_fts')
It has been answered already by #sharez and #benvc. I needed to make it work with weights though. This is how I did it based on their answers :
from sqlalchemy import Column, func, Index, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.operators import op
CONFIG = 'english'
Base = declarative_base()
def create_tsvector(*args):
field, weight = args[0]
exp = func.setweight(func.to_tsvector(CONFIG, field), weight)
for field, weight in args[1:]:
exp = op(exp, '||', func.setweight(func.to_tsvector(CONFIG, field), weight))
return exp
class Example(Base):
__tablename__ = 'example'
foo = Column(String)
bar = Column(String)
__ts_vector__ = create_tsvector(
(foo, 'A'),
(bar, 'B')
)
__table_args__ = (
Index('my_index', __ts_vector__, postgresql_using='gin'),
)
Previous answers here were helpful for pointing in the right direction.
Below, a distilled & simplified approach using ORM approach & TSVectorType helper from sqlalchemy-utils (that is quite basic and can be simply copy/pasted to avoid external dependencies if needed https://sqlalchemy-utils.readthedocs.io/en/latest/_modules/sqlalchemy_utils/types/ts_vector.html):
Defining a TSVECTOR column (TSVectorType) in your ORM model (declarative) populated automatically from the source text field(s)
import sqlalchemy as sa
from sqlalchemy_utils.types.ts_vector import TSVectorType
# ^-- https://sqlalchemy-utils.readthedocs.io/en/latest/_modules/sqlalchemy_utils/types/ts_vector.html
class MyModel(Base):
__tablename__ = 'mymodel'
id = sa.Column(sa.Integer, primary_key=True)
content = sa.Column(sa.String, nullable=False)
content_tsv = sa.Column(
TSVectorType("content", regconfig="english"),
sa.Computed("to_tsvector('english', \"content\")", persisted=True))
# ^-- equivalent for SQL:
# COLUMN content_tsv TSVECTOR GENERATED ALWAYS AS (to_tsvector('english', "content")) STORED;
__table_args__ = (
# Indexing the TSVector column
sa.Index("idx_mymodel_content_tsv", content_tsv, postgresql_using="gin"),
)
For additional details on querying using ORM, see https://stackoverflow.com/a/73999486/11750716 (there is an important difference between SQLAlchemy 1.4 and SQLAlchemy 2.0).
I need some help converting this query into SQLAlchemy.
select field from table t1 join table t2 on t1.detail_id = t2.id join table t3 on t3.id = t2.rate_id where t2.name = 'fred' and t3.rate_type = 'Custom' and t3.description = 'Default';
I have been able to convert inner join queries with two tables, but need some help with this one.
Appreciate your help.TIA.
If simple SQL query is enough you can try:
session.execute("SELECT t1.field AS t1_field "
"FROM t1 JOIN t2 ON t1.detail_id = t2.id JOIN t3 ON t2.rate_id = t3.id "
"WHERE t2.name = :name AND t3.rate_type = :rate_type AND t3.description = :description",
{'name': 'fred', 'rate_type': 'Custom', 'description': 'Default'})
But if you want to use SQLAlchemy declarative base then the query would look like:
results = session.query(T1.field).join(T2, T1.detail_id == T2.id).join(T3, T2.rate_id == T3.id).\
filter(T2.name == 'fred').\
filter(T3.rate_type == 'custom').\
filter(T3.description == 'lorem ipsum').all()
For the following models:
from sqlalchemy import create_engine, Integer, ForeignKey, String, Column
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class T1(Base):
__tablename__ = 't1'
id = Column(Integer, primary_key=True)
field = Column(String)
detail_id = Column(Integer, ForeignKey("t1.id"))
class T2(Base):
__tablename__ = 't2'
id = Column(Integer, primary_key=True)
name = Column(String)
rate_id = Column(Integer, ForeignKey("t1.id"))
class T3(Base):
__tablename__ = 't3'
id = Column(Integer, primary_key=True)
rate_type = Column(String)
description = Column(String)
I hope it helps.
SQLAlchemy provide both ORM way and SQL way to operate database. You can use exactly the raw SQL language (or SQLAlachemy SQL Express) to query.
(1) RAW SQL QUERY,Sample code:
engine = create_engine(...)
q = 'SELECT foo FROM t_bar WHERE col_name=:v_parameters'
rs = engine.execute(sqlalchemy.text(q), v_parameters=your_actual_value)
Check execute and basic usage. Also take look at ResultProxy to understand how to operate on returned result.
(2) ORM. If you want to use ORM, firstly you have to define models and mapper class. Sample Code.
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import String, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Father(Base):
id = Column(Integer, primary_key=True)
name = Column(String(31), unique=True, nullable=False)
# Relationship attributes
children = relationship('Sons',
passive_deletes=True,
back_populates='father')
class Son(Base):
id = Column(Integer, primary_key=True)
name = Column(String(31), unique=True, nullable=False)
# foreign keys
p_id = Column(Integer, ForeignKey('Father.id',
ondelete='CASCADE',
onupdate='CASCADE'))
# Relationship attributes
parent = relationship('Father',
passive_deletes=True,
back_populates='sons')
Then you can do ORM query operations
session.query(Father).join(Father.sons).filter(Son.name=='Sam')
is equal to SQL query
SELECT father.id, father.name FROM father JOIN son ON father.id=son.p_id WHERE son.name='Sam'.
Please check ORM mapper and ORM Query for more information.
For you application. If you have well mapped all of your tables, then you can use ORM way. If you do not need ORM feature, you can just use RAW SQL query.
Thank.
I get an incorrect record set, while adding an aggregate function like func.sum on a dynamic relationship. I have listed out a sample code below to demonstrate this.
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
relationship,
scoped_session,
sessionmaker,
backref
)
from sqlalchemy import (
create_engine,
Table,
Column,
Integer,
String,
ForeignKey,
func
)
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
Base = declarative_base()
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
userid = Column(String(15), unique=True, nullable=False)
article_list = relationship("Article", backref="user", lazy="dynamic")
class Tag(Base):
__tablename__ = 'tags'
id = Column(Integer, primary_key=True)
name = Column(String(25), nullable=False, unique=True)
class Article(Base):
__tablename__ = 'articles'
id = Column(Integer, primary_key=True)
title = Column(String(25), nullable=False)
duration = Column(Integer)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
tags = relationship('Tag', secondary="tag_map",
backref=backref("article_list", lazy="dynamic"))
tag_map_table = Table(
'tag_map', Base.metadata,
Column('tag_id', Integer, ForeignKey('tags.id'), nullable=False),
Column('article_id', Integer, ForeignKey('articles.id'), nullable=False))
engine = create_engine('sqlite:///tag_test.sqlite')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
t1 = Tag(name='software')
t2 = Tag(name='hardware')
john = User(userid='john')
a1 = Article(title='First article', duration=300)
a1.user = john
a1.tags.append(t1)
a1.tags.append(t2)
DBSession.add(a1)
a2 = Article(title='Second article', duration=50)
a2.user = john
a2.tags.append(t1)
a2.tags.append(t2)
DBSession.add(a1)
As we see above in the code, I have added two tags for both the articles. Now I want to query the articles written by the user 'John' grouped by tags along with it I want to find the sum of each tag duration.
john = DBSession.query(User).filter(User.userid=='john').first()
res = john.article_list.join(Article.tags).add_column(
func.sum(Article.duration)).group_by(Tag.id)
for article, tsum in res:
print ("Article : %s, Sum duration : %d" % (article.title, tsum))
The query generated for res is
SELECT articles.id AS articles_id, articles.title AS articles_title, articles.duration AS articles_duration, articles.user_id AS articles_user_id, sum(articles.duration) AS sum_1
FROM articles JOIN tag_map AS tag_map_1 ON articles.id = tag_map_1.article_id JOIN tags ON tags.id = tag_map_1.tag_id
WHERE :param_1 = articles.user_id GROUP BY tags.id
which when executed directly on the sqlite database yields two rows corresponding to the two tags
2|Second article|50|1|350
2|Second article|50|1|350
Whereas, the results returned by SQLAlchemy reflect only one row
Article : Second article, Sum duration : 350
But, if I add an extra column to contain tag-name in the AppenderQuery object
res = john.article_list.join(Article.tags).add_column(Tag.name).add_column(
func.sum(Article.duration)).group_by(Tag.id)
for article, tag_name, tsum in res:
print ("Article : %s, Tag : %s, Sum duration : %d" % (
article.title, tag_name, tsum))
I get proper results
Article : Second article, Tag : software, Sum duration : 350
Article : Second article, Tag : hardware, Sum duration : 350
So, what is the right way of using aggregate functions on AppenderQuery object in order to get categorized results?
I have defined few tables in Pyramid like this:
# coding: utf-8
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Integer, Float, DateTime, ForeignKey, ForeignKeyConstraint, String, Column
from sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref,
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
class Codes(Base):
__tablename__ = 'Code'
__table_args__ = {u'schema': 'Locations'}
id = Column(Integer, nullable=False)
code_str = Column(String(9), primary_key=True)
name = Column(String(100))
incoming = relationship(u'Voyages', primaryjoin='Voyage.call == Codes.code_str', backref=backref('Code'))
class Locations(Base):
__tablename__ = 'Location'
__table_args__ = {u'schema': 'Locations'}
unit_id = Column(ForeignKey(u'Structure.Definition.unit_id', ondelete=u'RESTRICT', onupdate=u'CASCADE'), primary_key=True, nullable=False)
timestamp = Column(DateTime, primary_key=True, nullable=False)
longitude = Column(Float)
latitude = Column(Float)
class Voyages(Base):
__tablename__ = 'Voyage'
__table_args__ = (ForeignKeyConstraint(['unit_id', 'Voyage_id'], [u'Locations.Voyages.unit_id', u'Locations.Voyages.voyage_id'], ondelete=u'RESTRICT', onupdate=u'CASCADE'), {u'schema': 'Locations'}
)
uid = Column(Integer, primary_key=True)
unit_id = Column(Integer)
voyage_id = Column(Integer)
departure = Column(ForeignKey(u'Locations.Code.code_str', ondelete=u'RESTRICT', onupdate=u'CASCADE'))
call = Column(ForeignKey(u'Locations.Code.code_str', ondelete=u'RESTRICT', onupdate=u'CASCADE'))
departure_date = Column(DateTime)
voyage_departure = relationship(u'Codes', primaryjoin='Voyage.departure == Codes.code_str')
voyage_call = relationship(u'Codes', primaryjoin='Voyage.call == Codes.code_str')
class Definitions(Base):
__tablename__ = 'Definition'
__table_args__ = {u'schema': 'Structure'}
unit_id = Column(Integer, primary_key=True)
name = Column(String(90))
type = Column(ForeignKey(u'Structure.Type.id', ondelete=u'RESTRICT', onupdate=u'CASCADE'))
locations = relationship(u'Locations', backref=backref('Definition'))
dimensions = relationship(u'Dimensions', backref=backref('Definition'))
types = relationship(u'Types', backref=backref('Definition'))
voyages = relationship(u'Voyages', backref=backref('Definition'))
class Dimensions(Base):
__tablename__ = 'Dimension'
__table_args__ = {u'schema': 'Structure'}
unit_id = Column(ForeignKey(u'Structure.Definition.unit_id', ondelete=u'RESTRICT', onupdate=u'CASCADE'), primary_key=True, nullable=False)
length = Column(Float)
class Types(Base):
__tablename__ = 'Type'
__table_args__ = {u'schema': 'Structure'}
id = Column(SmallInteger, primary_key=True)
type_name = Column(String(255))
type_description = Column(String(255))
What I am trying to do here is to find a specific row from Codes table (filter it by code_str) and get all related tables in return, but under the condition that Location table returns only the last row by timestamp, Voyage table must return only the last row by departure, and it must have all information from Definitions table.
I started to create a query from the scratch and came across something like this:
string_to_search = request.matchdict.get('code')
sub_dest = DBSession.query(func.max(Voyage.departure).label('latest_voyage_timestamp'), Voyage.unit_id, Voyage.call.label('destination_call')).\
filter(Voyage.call== string_to_search).\
group_by(Voyage.unit_id, Voyage.call).\
subquery()
query = DBSession.query(Codes, Voyage).\
join(sub_dest, sub_dest.c.destination_call == Codes.code_str).\
outerjoin(Voyage, sub_dest.c.latest_voyage_timestamp == Voyage.departure_date)
but I have notice that when I iterate through my results (like for code, voyage in query) I am actually iterating every Voyage I get in return. In theory it is not a big problem for me but I am trying to construct some json response with basic information from Codes table which would include all possible Voyages (if there is any at all).
For example:
code_data = {}
all_units = []
for code, voyage in query:
if code_data is not {}:
code_data = {
'code_id': code.id,
'code_str': code.code_str,
'code_name': code.name,
}
single_unit = {
'unit_id': voyage.unit_id,
'unit_departure': str(voyage.departure_date) if voyage.departure_date else None,
}
all_units.append(single_unit)
return {
'code_data': exception.message if exception else code_data,
'voyages': exception.message if exception else all_units,
}
Now, this seems a bit wrong because I don't like rewriting this code_data in each loop, so I put if code_data is not {} line here, but I suppose it would be much better (logical) to iterate in a way similar to this:
for code in query:
code_data = {
'code_id': code.id,
'code_str': code.code_str,
'code_name': code.name,
}
for voyage in code.voyages:
single_unit = {
'unit_id': voyage.unit_id,
'unit_departure': str(voyage.departure) if voyage.departure else None,
}
all_units.append(single_unit)
return {
'code_data': exception.message if exception else code_data,
}
So, to get only single Code in return (since I queried the db for that specific Code) which would then have all Voyages related to it as a nested value, and of course, in each Voyage all other information related to Definition of the particular Unit...
Is my approach good at all in the first place, and how could I construct my query in order to iterate it in this second way?
I'm using Python 2.7.6, SQLAlchemy 0.9.7 and Pyramid 1.5.1 with Postgres database.
Thanks!
Try changing the outer query like so:
query = DBSession.query(Codes).options(contains_eager('incoming')).\
join(sub_dest, sub_dest.c.destination_call == Codes.code_str).\
outerjoin(Voyage, sub_dest.c.latest_voyage_timestamp == Voyage.departure_date)
In case of problems, try calling the options(...) part like so:
(...) .options(contains_eager(Codes.incoming)). (...)
This should result in a single Codes instance being returned with Voyages objects accessible via the relationship you've defined (incoming), so you could proceed with:
results = query.all()
for code in results:
print code
# do something with code.incoming
# actually, you should get only one code so if it proves to work, you should
# use query.one() so that in case something else than a single Code is returned,
# an exception is thrown
of course you need an import, e.g.: from sqlalchemy.orm import contains_eager
Suppose we have a PostgreSQL database with two tables A, B.
table A columns: id, name
table B columns: id, name, array_a
The column array_a in table B contains a variable length array of ids from table A. In SQLAlchemy we have two classes that model those tables, say class A and B.
The following works fine to get all the objects A that are referenced in an object B:
session.query(A).join(B, A.id == func.any(B.array_a)).filter(B.id == <id>).all()
How can we create a relationship in B referencing the objects A corresponding to the array? Tried column comparators using the func.any above but it complains that ANY(array_a) is not a column in the model. Specifying the primaryjoin conditions as above doesn't seem to cut it either.
This anti-pattern is called "Jaywalking"; and PostgreSQL's powerful type system makes it very tempting. you should be using another table:
CREATE TABLE table_a (
id SERIAL PRIMARY KEY,
name VARCHAR
);
CREATE TABLE table_b (
id SERIAL PRIMARY KEY,
name VARCHAR
);
CREATE TABLE a_b (
a_id INTEGER PRIMARY KEY REFERENCES table_a(id),
b_id INTEGER PRIMARY KEY REFERENCES table_b(id)
)
Which is mapped:
from sqlalchemy import *
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
Base = declarative_base()
a_b_table = Table("a_b", Base.metadata,
Column("a_id", Integer, ForeignKey("table_a.id"), primary_key=True),
Column("b_id", Integer, ForeignKey("table_b.id"), primary_key=True))
class A(Base):
__tablename__ = "table_a"
id = Column(Integer, primary_key=True)
name = Column(String)
class B(Base):
__tablename__ = "table_b"
id = Column(Integer, primary_key=True)
name = Column(String)
a_set = relationship(A, secondary=a_b_table, backref="b_set")
example:
>>> print Query(A).filter(A.b_set.any(B.name == "foo"))
SELECT table_a.id AS table_a_id, table_a.name AS table_a_name
FROM table_a
WHERE EXISTS (SELECT 1
FROM a_b, table_b
WHERE table_a.id = a_b.a_id AND table_b.id = a_b.b_id AND table_b.name = :name_1)
If you are stuck with the ARRAY column, your best bet is to use an alternate selectable that "looks" like a proper association table.
from sqlalchemy import *
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
Base = declarative_base()
class A(Base):
__tablename__ = "table_a"
id = Column(Integer, primary_key=True)
name = Column(String)
class B(Base):
__tablename__ = "table_b"
id = Column(Integer, primary_key=True)
name = Column(String)
array_a = Column(postgresql.ARRAY(Integer))
a_b_selectable = select([func.unnest(B.array_a).label("a_id"),
B.id.label("b_id")]).alias()
A.b_set = relationship(B, secondary=a_b_selectable,
primaryjoin=A.id == a_b_selectable.c.a_id,
secondaryjoin=a_b_selectable.c.b_id == B.id,
viewonly=True,)
B.a_set = relationship(A, secondary=a_b_selectable,
primaryjoin=A.id == a_b_selectable.c.a_id,
secondaryjoin=a_b_selectable.c.b_id == B.id,
viewonly=True)
which gives you:
>>> print Query(A).filter(A.b_set.any(B.name == "foo"))
SELECT table_a.id AS table_a_id, table_a.name AS table_a_name
FROM table_a
WHERE EXISTS (SELECT 1
FROM (SELECT unnest(table_b.array_a) AS a_id, table_b.id AS b_id
FROM table_b) AS anon_1, table_b
WHERE table_a.id = anon_1.a_id AND anon_1.b_id = table_b.id AND table_b.name = :name_1)
And obviously, since there's no real table there, viewonly=True is neccesary and you can't get the nice, dynamic objecty goodness you would if you had avoided jaywalking.
Or else simply you can join explicitly like below:
class A(Base):
__tablename__ = "table_a"
id = Column(Integer, primary_key=True)
name = Column(String)
class B(Base):
__tablename__ = "table_b"
id = Column(Integer, primary_key=True)
name = Column(String)
array_a = Column(postgresql.ARRAY(Integer))
a_ids= relationship('A',primaryjoin='A.id == any_(foreign(B.array_a))',uselist=True)