I am working on a delete request. All of my functions are working, but especially this one is not. Actually, it runs, and using my insomnia looks like the row was deleted, but when I get all rows it is still there.
This belongs to my init.py from model folder:
class DatabaseConnector:
#classmethod
def get_conn_cur(cls):
cls.conn = psycopg2.connect(**configs)
cls.cur = cls.conn.cursor()
#classmethod
def commit_and_close(cls):
cls.conn.commit()
cls.cur.close()
cls.conn.close()
This belongs to my anime_model.py folder:
class Anime(DatabaseConnector):
def __init__(self, anime: string, released_date: string, seasons: int) -> None:
self.anime = anime.lower().title()
self.released_date = released_date
self.seasons = seasons
#classmethod
def remove_an_anime(cls, anime_id: int):
cls.get_conn_cur()
cls.create_a_table()
query = f"DELETE FROM animes WHERE id={anime_id} RETURNING *";
cls.cur.execute(query)
anime = cls.cur.fetchall()
cls.cur.close()
cls.conn.close()
return anime
It belongs to my controller folder, anime_controller.py file:
def delete_an_anime(anime_id):
anime_to_delete = Anime.remove_an_anime(anime_id)
if not anime_to_delete:
return {"error": f"id {anime_id} not found"}, HTTPStatus.NOT_FOUND
serialized_anime = dict(zip(anime_columns, anime_to_delete))
return serialized_anime, HTTPStatus.ACCEPTED
Related
I have the following function
import requests
children_dict = {}
def get_list_of_children(base_url, username, password, folder="1"):
token = get_token()
url = f"{base_url}/unix/repo/folders/{folder}/list"
json = requests_json(url,token)
for obj in json["list"]:
if obj['name'] == 'MainFolder':
folderId = obj['id']
url_parent = f"{base_url}/unix/repo/folders/{folderId}/list"
json_parent = requests_json(url_parent,token)
for obj_child in json_parent['list']:
if obj_child['folder'] == True:
folder_grand_child_id = obj_child['id']
url_grand_child = f"{base_url}/unix/repo/folders/{folder_grand_child_id}/list"
json_grand_child = requests_json(url_grand_child,token)
for obj_grand_child in json_grand_child["list"]:
if obj_grand_child['name'] == 'SubFolder':
folder_grand_grand_child = obj_grand_child['id']
url_grand_grand_child = f"{base_url}/unix/repo/folders/{folder_grand_grand_child}/list"
json_grand_grand_child = requests_json(url_grand_grand_child,token)
for obj_grand_grand_child in json_grand_grand_child["list"]:
if obj_grand_grand_child['name'] == 'MainTasks':
folder_grand_grand_grand_child = obj_grand_grand_child['id']
url_grand_grand_grand_child = f"{base_url}/unix/repo/folders/{folder_grand_grand_grand_child}/list"
json_grand_grand_grand_child = requests_json(url_grand_grand_grand_child,token)
for obj_grand_grand_grand_child in json_grand_grand_grand_child["list"]:
children_dict[[obj_grand_grand_grand_child['id']] = obj_grand_grand_grand_child['name']
return children_dict
What i am trying to accomplish here is to make repeated api calls to traverse through http folder structure to get the list of files in the last directory
The function works as intended but sonarlint is through the below error
Refactor this function to reduce its Cognitive Complexity from 45 to the 15 allowed. [+9 locations]sonarlint(python:S3776)
is there is a better way to handle this function ?
can anyone refactor this, pointing me in the right direct will do
This isn't a complete solution, but to answer your question "how can I simplify this" more generally, you need to look for repeated patterns in your code and generalize them into a function. Perhaps it's a function you can call recursively, or in a loop. For example, in that deeply-nested statement of yours it's just the same pattern over and over again like:
url = f"{base_url}/unix/repo/folders/{folder}/list"
json = requests_json(url,token)
for obj in json["list"]:
if obj['name'] == '<some folder name>':
folderId = obj['id']
# ...repeat...
So try generalizing this into a loop, maybe, like:
url_format = "{base_url}/unix/repo/folders/{folder_id}/list"
folder_hierarchy = ['MainFolder', 'SubFolder', 'MainTasks']
folder_id = '1' # this was the argument passed to your function
for subfolder in folder_hierarchy:
url = url_format.format(base_url=base_url, folder_id=folder_id)
folder_json = requests_json(url, token)
for obj in folder_json['list']:
if obj['name'] == subfolder:
folder_id = obj['id']
break
# Now the pattern repeats for the next level of the hierarchy but
# starting with the new folder_id
This is just schematic and you may need to generalize further, but it's one idea.
If your goal is to traverse a more complicated hierarchy you might want to look into tree-traversal algorithms.
There's plenty of repeating code. Once you identify the repeating patterns, you can extract them to the classes and functions. In particular, I find it useful to isolate all the web API logic from the rest of the code:
class Client:
def __init__(self, base_url, token):
self.base_url = base_url
self.token = token
def list_folder(self, folder_id):
return request_json(
f'{self.base_url}/unix/repo/folders/{folder_id}/list', self.token
)['list']
def get_subfolders(self, parent_id=1):
return [c for c in self.list_folder(parent_id) if c['folder']]
def get_subfolder(self, parent_id, name):
children = self.list_folder(parent_id)
for child in children:
if child['name'] == name:
return child
return None
def resolve_path(self, path, root_id=1):
parent_id = root_id
for p in path:
current = self.get_subfolder(parent_id, p)
if not current:
return None
parent_id = current['id']
return current
Now you can use the class above to simplify the main code:
client = Client(base_url, token)
for folder in client.get_subfolders():
child = client.resolve_path(folder['id'], ('SubFolder', 'MainTasks'))
if child:
# do the rest of the stuff
The code above is not guaranteed to work as is, just an illustration of the idea.
I can't really test it, but I'd make it like the following in order to easily build many levels of repeating code.
class NestedProcessing:
def __init__(self, base_url):
self.base_url = base_url
self.token = get_token()
self.obj_predicates = []
def next_level_predicate(self, obj_predicate):
self.obj_predicates.append(obj_predicate)
return self
def final_action(self, obj_action):
self.obj_action = obj_action
return self
def process(self, first_folder_id):
self.process_level(0, first_folder_id)
def process_level(self, index, folder_id):
obj_is_good = self.obj_predicates[index]
url = f"{self.base_url}/unix/repo/folders/{folder_id}/list"
json = requests_json(url, self.token)
for obj in json["list"]:
if index == len(self.obj_predicates) - 1: # last level
self.obj_action(obj)
elif obj_is_good(obj):
self.process_level(index + 1, obj['id'])
def get_list_of_children(base_url, username, password, folder="1"):
children_dict = {}
NestedProcessing(base_url)
.next_level_predicate(lambda obj: obj['name'] == 'MainFolder')
.next_level_predicate(lambda obj: obj['folder'] == True)
.next_level_predicate(lambda obj: obj['name'] == 'SubFolder')
.next_level_predicate(lambda obj: obj['name'] == 'MainTasks')
.final_action(lambda obj, storage=children_dict: storage.update({obj['id']: obj['name']})
.process(folder)
return children_dict
I need to be able to build my buildObject using data extracted from csv file columns
class BuildObject(ObjectID):
def __init__(self, ObjectID, ObjectName, ObjectPrice, ObjectLocation, ObjectColour, ObjectAge, ObjectTag):
self.ObjectID= ObjectID
self.ObjectName= ObjectName
def main():
with open(filename1, "r") as csv1, open(filename2, "r") as csv2:
csvReader1 = csv.DictReader(csv1)
csvReader2 = csv.DictReader(csv2)
csvList = []
for row1, row2 in zip(csvReader1, csvReader2):
csvList.append((row2["ObjectName"], row1["ObjectId"], row1["ObjectPrice"]))
return csvList
Comment: My concern is with this answer that it will work fine provided the csv files have the exact same objectID and in the same order - but will happen if a objectID/Object is missing only in one of the csv files?
Therefore, you can't use zip(csvReader1, csvReader2), you
need random access to a Date_Record using the ObjectID as key/index.
As you mentinioned large amounts of data I would recommend go with SQL.
If you want to do it using Python objects change the following:
def __init__(self):
self._data_store = {}
#data_store.setter
def data_store(self, data):
...
self._data_store[record['ObjectID'] = record
Question: The one topic would be the create a BuildObject for every unique itemID using the data from the csv files and sql query
Checking your code, got the following Error:
class BuildObject(ObjectID):
NameError: name 'ObjectID' is not defined
Why do you inherit from ObjectID?
Where are these class defined?
Consider the following:
class Data_Record():
"""
This class object hold all data for ONE Record
"""
def __init__(self, ObjectID, ObjectName):
self.ObjectID= ObjectID
self.ObjectName= ObjectName
# ... (omitted for brevity)
class Data_Store():
"""
This class object handels Data_Record, reading from csv or sql or anywhere
"""
def __init__(self):
# List to hold all Data_Record objects
self._data_store = []
# Access read only the Data_Record objects
#property
def data_store(self):
return self._data_store
# Add ONE Data_Record from either csv or sql or anywhere
#data_store.setter
def data_store(self, data):
# Condition type(data)
if isinstance(data, dict):
record = Data_Record(**data)
elif isinstance(data, list):
record = Data_Record(**tuple(data))
else:
raise(ValueError, "Data of type({}) are not supported!".format(type(data)))
self._data_store.append(record)
# Method to read from csv
def read_csv(self, fname1, fname2):
# ... (omitted for brevity)
csvReader1, csvReader2 = ([], [])
for csv1, csv2 in zip(csvReader1, csvReader2):
self.data_store = (csv2["ObjectName"], csv1["ObjectId"])
# Method to read from sql
def read_sql(self, sql, query):
result = sql.query(query)
for record in result:
self.data_store = record
Alternative: Without #property/getter/setter.
Here the read(... functions have to know how to add a new Date_Record object to self.data_store. Note: self.data_store is now a public attribute.
If you decide, later on, to store not in memory, you have to rewrite both read(... functions.
class Data_Record():
def __init__(self, data=None):
# Condition type(data)
if isinstance(data, dict):
self.ObjectID = data['ObjectID']
self.ObjectName = data['ObjectName']
elif isinstance(data, list):
# List have to be in predefined order
# e.g ObjectID == Index 0 ObjectName == Index 1 etc.
self.ObjectID = data[0]
self.ObjectName = data[1]
else:
self.ObjectID = None
self.ObjectName = None
class Data_Store():
def __init__(self):
self.data_store = []
def read_csv(self, fname1, fname2):
for csv1, csv2 in zip(csvReader1, csvReader2):
self.data_store.append(Data_Record((csv2["ObjectName"], csv1["ObjectId"])))
def read_sql(self, query):
for record in SQL.query(query):
self.data_store.append(Data_Record(record))
I have some trouble with integration test. I'm using python 3.5, SQLAlchemy 1.2.0b3, latest docker image of postgresql. So, I wrote test:
# tests/integration/usecases/test_users_usecase.py
class TestGetUsersUsecase(unittest.TestCase):
def setUp(self):
Base.metadata.reflect(_pg)
Base.metadata.drop_all(_pg)
Base.metadata.create_all(_pg)
self._session = sessionmaker(bind=_pg, autoflush=True, autocommit=False, expire_on_commit=True)
self.session = self._session()
self.session.add(User(id=1, username='user1'))
self.session.commit()
self.pg = PostgresService(session=self.session)
def test_get_user(self):
expected = User(id=1, username='user1')
boilerplates.get_user_usecase(storage_service=self.pg, id=1, expected=expected)
# tests/boilerplates/usecases/user_usecases.py
def get_user_usecase(storage_service, id, expected):
u = GetUser(storage_service=storage_service)
actual = u.apply(id=id)
assert expected == actual
In usecase I did next:
# usecases/users.py
class GetUser(object):
"""
Usecase for getting user from storage service by Id
"""
def __init__(self, storage_service):
self.storage_service = storage_service
def apply(self, id):
user = self.storage_service.get_user_by_id(id=id)
if user is None:
raise UserDoesNotExists('User with id=\'%s\' does not exists' % id)
return user
storage_service.get_user_by_id looks like:
# infrastructure/postgres.py (method of Postgres class)
def get_user_by_id(self, id):
from anna.domain.user import User
return self.session.query(User).filter(User.id == id).one_or_none()
And it does not work in my integration test. But if I add print(actual) in test before assert - all is OK. I thought that my test is bad, I try many variants and all does not works. Also I tried return generator from storage_service.get_user_by_id() and it also does not work. What I did wrong? It works good only if print() was called in test.
I have an instance of a class, within which I'm trying to use a method defined in the parent.
The code I have produces the following error:
if WordTrigger.isWordIn(story.getTitle()): return True:
TypeError: unbound method isWordIn() must be called with WordTrigger instance as first argument (got str instance instead)
Can anyone steer me in the right direction. i.e. should I be using super() 2.7 version? If so how?
class NewsStory(object):
"""
Data structure for RSS data feed collector
"""
def setUp(self):
pass
def __init__(self, cguid, ctitle, csubject, csummary, clink):
# A globally unique identifier for this news story.
self.cguid = cguid
# The news story's headline.
self.ctitle = ctitle
# A subject tag for this story (e.g. 'Top Stories', or 'Sports').
self.csubject = csubject
# A paragraph or so summarizing the news story.
self.csummary = csummary
# A link to a web-site with the entire story.
self.clink = clink
def getGuid(self):
return self.cguid
def getTitle(self):
return self.ctitle
def getSubject(self):
return self.csubject
def getSummary(self):
return self.csummary
def getLink(self):
return self.clink
#Trigger
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
raise NotImplementedError
#WordTrigger
class WordTrigger(Trigger):
def __init__(self, cword):
# A globally unique identifier for this news story.
self.cword = cword
def isWordIn(self, ctext):
# Checks if word is in text and returns true if present
#Normalize case for word
self.cword = self.cword.lower()
# normalise text case and remove punctuation
self.ctext = self.ctext.lower()
exclude = set(str.punctuation)
self.ctext = ''.join(ch for ch in self.ctext if ch not in exclude)
# Check if word occurs in text
if self.cword in self.ctext:
return True
else:
return False
#TitleTrigger
class TitleTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
if WordTrigger.isWordIn(story.getTitle()): return True
else: return False
In your class TitleTrigger, use self. You access the method through an instance of the class, not on the class object itself.
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
if self.isWordIn(story.getTitle()): return True
else: return False
I'm trying to figure out the best way to create a class that can modify and create new users all in one. This is what I'm thinking:
class User(object):
def __init__(self,user_id):
if user_id == -1
self.new_user = True
else:
self.new_user = False
#fetch all records from db about user_id
self._populateUser()
def commit(self):
if self.new_user:
#Do INSERTs
else:
#Do UPDATEs
def delete(self):
if self.new_user == False:
return False
#Delete user code here
def _populate(self):
#Query self.user_id from database and
#set all instance variables, e.g.
#self.name = row['name']
def getFullName(self):
return self.name
#Create a new user
>>u = User()
>>u.name = 'Jason Martinez'
>>u.password = 'linebreak'
>>u.commit()
>>print u.getFullName()
>>Jason Martinez
#Update existing user
>>u = User(43)
>>u.name = 'New Name Here'
>>u.commit()
>>print u.getFullName()
>>New Name Here
Is this a logical and clean way to do this? Is there a better way?
Thanks.
You can do this with metaclasses. Consider this :
class MetaCity:
def __call__(cls,name):
“”“
If it’s in the database, retrieve it and return it
If it’s not there, create it and return it
““”
theCity = database.get(name) # your custom code to get the object from the db goes here
if not theCity:
# create a new one
theCity = type.__call__(cls,name)
return theCity
class City():
__metaclass__ = MetaCity
name = Field(Unicode(64))
Now you can do things like :
paris = City(name=u"Paris") # this will create the Paris City in the database and return it.
paris_again = City(name=u"Paris") # this will retrieve Paris from the database and return it.
from : http://yassinechaouche.thecoderblogs.com/2009/11/21/using-beaker-as-a-second-level-query-cache-for-sqlalchemy-in-pylons/
Off the top of my head, I would suggest the following:
1: Use a default argument None instead of -1 for user_id in the constructor:
def __init__(self, user_id=None):
if user_id is None:
...
2: Skip the getFullName method - that's just your Java talking. Instead use a normal attribute access - you can convert it into a property later if you need to.
What you are trying to achieve is called Active Record pattern. I suggest learning existing systems providing this sort of things such as Elixir.
Small change to your initializer:
def __init__(self, user_id=None):
if user_id is None: