(Pymongo) several functions cover with Commit and rollback, and one of them stll proceed while whole function should stop
I read the manual, that all the example commit and rollback only cover two operation, is that the limit?, usually should contain 3 or more operations and either operate in the same time or not operate if error https://pymongo.readthedocs.io/en/stable/api/pymongo/client_session.html
I tried to contain 3 operation inside commit and rollback but
mycol_two.insert_one() didn't stop proceed like other function when error occur
brief description:
I have three collections in same DB
collection "10_20_cash_all"
collection "10_20_cash_log"
collection "10_20_cash_info"
commit and rollback on line 39 to 44
line 42 print( 3/0 ) , I intent to make an error, expect all function would stop proceed
import pymongo
import datetime
import json
from bson.objectid import ObjectId
from bson import json_util
import re
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["(practice_10_14)-0004444"]
mycol_one = mydb["10_20_cash_all"]
mycol_two = mydb["10_20_cash_log"]
mycol_3rd = mydb["10_20_cash_info"]
# already store 100$ in bank
# doc_two = {"ID" : 100998 , "Cash_log$" : 5 } # withdraw 70$ from bank
doc_two = input("Enter ID and log amount$: ")
doc_3rd = input("Enter extra info: ")
doc_two_dic = json.loads(doc_two)
doc_3rd_dic = json.loads(doc_3rd)
# doc_3rd = {"note" : "today is good" }
ID_input = doc_two_dic['ID']
print("ur id is :" + str(ID_input))
doc_one = {"ID" : ID_input}
with myclient.start_session() as s:
cash_all_result = mycol_one.find_one(doc_one, session=s)
def cb(s):
try:
while True:
cash_all_result = mycol_one.find_one(doc_one, session=s)
mycol_two.insert_one(doc_two_dic, session=s)
print( 3/0 )
mycol_3rd.insert_one(doc_3rd_dic, session=s)
print( "now total is :" + str(cash_all_result['Cash_$']) )
Cash_total_int = int(cash_all_result['Cash_$'])
log_int = int(doc_two_dic['Cash_log$'])
if Cash_total_int < log_int:
print("error: withdraw is over ur balance")
break
new_Cash_total = Cash_total_int - log_int
print("now total is :" + str(new_Cash_total))
newvalues_json = { "$set" : {"Cash_$" : new_Cash_total } }
mycol_one.update_one(doc_one , newvalues_json, session=s)
fail_condition_json = {"ok" : 1 , "fail reason" : "no error "}
print(fail_condition_json)
return fail_condition_json
except Exception as e:
fail_condition_json = {"ok" : 0 , "fail reason" : "error raise on start_session()"}
print(fail_condition_json)
return fail_condition_json
s.with_transaction(cb)
command prompt:
Enter ID and log amount$: {"ID" : 100998 , "Cash_log$" : 5 }
Enter extra info: {"note" : "today is good" }
ur id is :100998
{'ok': 0, 'fail reason': 'error raise on start_session()'}
the "10_20_cash_log" still store new value which shoud empty/not run like '"10_20_cash_info"' is empty
{
"_id" : ObjectId("635262e502725626c39cbe9e"),
"ID" : 100998,
"Cash_log$" : 5
}
Related
I am really new to twitter api, and I've been trying to get a list of IDs of everyone that retweeted a specific tweet.
After several attempts i can't get the 'api.get_retweeter_ids' to get every id. It always seems to get a few. I know there is a limit of 100 per request, but the function just ends there after getting around 50-90 IDs on a tweet with 30k retweets or so.
Here is my code
def get_user_ids_by_retweets(tweetid):
retweeters_ids = []
for i, _id in enumerate(tweepy.Cursor(api.get_retweeter_ids, id=tweetid).items()):
retweeters_ids.append(_id)
print(i, _id)
df = pd.DataFrame(retweeters_ids)
# print(df)
return retweeters_ids
Demo for get all of re tweeter user list (name, id and username)
https://twitter.com/Nike/status/1582388225049780230/retweets
code
import tweepy
import json
def get_user_ids_by_retweets(tweet_id):
# get client with token
bearer_token ="*************************"
client = tweepy.Client(bearer_token=bearer_token)
listUser = []
# get first paging retweet users
retweeters = client.get_retweeters(id=tweet_id)
for retweeter in retweeters.data:
listUser.append({
"name": retweeter.name,
"id": retweeter.id,
"username": retweeter.username
})
next_token = retweeters.meta['next_token']
# get til end of paging retweet users
while next_token != None:
retweeters = client.get_retweeters(id=tweet_id, pagination_token=next_token)
if retweeters.data is not None:
for retweeter in retweeters.data:
listUser.append({
"name": retweeter.name,
"id": retweeter.id,
"username": retweeter.username
})
next_token = retweeters.meta['next_token']
else:
next_token = None
return listUser
def obj_dict(obj):
return obj.__dict__
tweet_id="1582388225049780230"
listUser = get_user_ids_by_retweets(tweet_id)
print(json.dumps(listUser, indent=4, default=obj_dict))
Result
[
{
"name": "valmig",
"id": 1594136795905593344,
"username": "AngelVa00615402"
},
{
"name": "Wyatt Jones",
"id": 764734669434871808,
"username": "TheGhostZeus"
},
{
"name": "Prime Projects",
"id": 1603887705242435584,
"username": "PrimeProjects4"
},
... removed
{
"name": "Ryan Maldonado",
"id": 1419009007688224768,
"username": "RyanMal87509518"
},
{
"name": "Jimmy Daugherty",
"id": 20888017,
"username": "JimmyDaugherty"
},
{
"name": "Nike Basketball",
"id": 5885732,
"username": "nikebasketball"
}
]
Main Idea
Get tweeter API return limited number of tweeters with next_token.
It can be next paging's tweeter by assign to pagination_token.
It can be all of tweeter until 'next_token` is null.
So #1 and #2 get two tweeters with next_token , those sum tweeters are same as #3 tweeters.
import tweepy
bearer_token ="*************************"
client = tweepy.Client(bearer_token=bearer_token)
tweet_id="1582388225049780230"
print("#1 -------- Get first two tweeter -------------------------")
retweeters = client.get_retweeters(id=tweet_id, max_results=2)
print("#2 -------- Show Meta --------------------")
print(retweeters.meta)
print(" ")
print("#3 -------- print two -------------------------")
for retweeter in retweeters.data:
print(retweeter.name, " -> ",retweeter.id,",",retweeter.username)
print(" ")
print("#4 ---------Get Next two tweeter ---------------------------")
retweeters = client.get_retweeters(id=tweet_id, pagination_token=retweeters.meta['next_token'] ,max_results=2)
print(retweeters.meta)
print(" ")
print("#5 -------- print two -------------------------")
for retweeter in retweeters.data:
print(retweeter.name, " -> ",retweeter.id,",",retweeter.username)
print(" ")
print("#6 --- Get First four tweeter == are same #1 + #2 ---------")
retweeters = client.get_retweeters(id=tweet_id, max_results=4)
print(" ")
print("#7 -------- print four -------------------------")
for retweeter in retweeters.data:
print(retweeter.name, " -> ",retweeter.id,",",retweeter.username)
$ python retweet.py
#1 -------- Get first two tweeter -------------------------
#2 -------- Show Meta --------------------
{'result_count': 2, 'next_token': '7140dibdnow9c7btw4827c3yb0pfg7mg4qq12dn59ot9s'}
#3 -------- print two -------------------------
valmig -> 1594136795905593344 , AngelVa00615402
Wyatt Jones -> 764734669434871808 , TheGhostZeus
#4 ---------Get Next two tweeter ---------------------------
{'result_count': 2, 'next_token': '7140dibdnow9c7btw4827c3nilr9nqckqkuxdzj3u7pkn', 'previous_token': '77qpymm88g5h9vqkluxdnrmaxhecakrtbzn80cd5hizht'}
#5 -------- print two -------------------------
Prime Projects -> 1603887705242435584 , PrimeProjects4
Joshua Paul Hudson -> 847275330 , JoshswiftJoshua
#6 --- Get First four tweeter == are same #1 + #2 ---------
#7 -------- print four -------------------------
valmig -> 1594136795905593344 , AngelVa00615402
Wyatt Jones -> 764734669434871808 , TheGhostZeus
Prime Projects -> 1603887705242435584 , PrimeProjects4
Joshua Paul Hudson -> 847275330 , JoshswiftJoshua
References
List of objects to JSON with Python
Python – Append to JSON File
Twitter API v2 Retweet
I would avoid managing the tokens manually, if not needed. The Paginator is the tool for it (it's the API V2 version of the API V1.1 Cursor that you've tried to use). If you are sure that the amount of retweets is covered by the currently available number of requests (default is 100 retweeters per request) then you could try the following (it's the equivalent to the other answer):
def get_user_ids_by_retweets(tweet_id):
client = tweepy.Client(BEARER_TOKEN, return_type=dict)
return list(tweepy.Paginator(client.get_retweeters, tweet_id).flatten())
If you're not sure about it but just want to give it a try without loosing any retrieved retweeters, then you could use this variation which catches the resp. tweepy.errors.TooManyRequests exception:
def get_user_ids_by_retweets(tweet_id):
client = tweepy.Client(BEARER_TOKEN, return_type=dict)
users = []
try:
for page in tweepy.Paginator(client.get_retweeters, tweet_id):
users.extend(page.get("data", []))
except tweepy.errors.TooManyRequests:
print("Too many requests, couldn't retrieve all retweeters.")
return users
If you want to make sure that you get all retweeters, then you could add a waiting period that is tailored to your access level (if you're using the free version then you should have 75 requests per 15 minutes, i.e. after reaching the limit you need to wait 60 * 15 seconds). Here you need to use the token to re-enter at the point where you left in case the rate limit was reached:
from time import sleep
DURATION = 60 * 15 + 5
def get_user_ids_by_retweets(tweet_id):
client = tweepy.Client(BEARER_TOKEN, return_type=dict)
users, token = [], None
while True:
pages = tweepy.Paginator(
client.get_retweeters, tweet_id, pagination_token=token
)
try:
for page in pages:
users.extend(page.get("data", []))
token = page["meta"].get("next_token", None)
if token is None:
break
except tweepy.errors.TooManyRequests:
print("Request rate limit reached, taking a nap.")
sleep(DURATION)
return users
I am consuming Amazon Connect CTRs through Amazon Kinesis and inserting my data into Postgres. I am facing very unexpected behavior from Kinesis and Lambda function. Whenever a CTR record comes through kinesis, my lambda gets invoked and after inserting that record into Postgres, it again gets invoked and is very unexpected behavior. Although, I have received only one record. Here is my code, if anything is wrong with the code please correct me:
def lambda_handler(event, context):
print(event['Records'])
print(event)
for record in event['Records']:
conn = psycopg2.connect(
host = hostt,
user = username,
password = passwordd,
database = databasee
)
cur = conn.cursor(cursor_factory = RealDictCursor)
payload = base64.b64decode(record['kinesis']['data'])
de_serialize_payload = json.loads(payload)
print(len(de_serialize_payload))
print(de_serialize_payload)
try:
for dsp in de_serialize_payload:
if de_serialize_payload['Agent'] != None and de_serialize_payload['CustomerEndpoint'] != None and de_serialize_payload['Recording'] != None and de_serialize_payload['TransferredToEndpoint'] != None:
required_data = {
'arn' : de_serialize_payload['Agent']['ARN'],
'aftercontactworkduration' : de_serialize_payload['Agent']['AfterContactWorkDuration'],
'aftercontactworkendtimestamp' : de_serialize_payload['Agent']['AfterContactWorkEndTimestamp'],
'aftercontactworkstarttimestamp' : de_serialize_payload['Agent']['AfterContactWorkStartTimestamp'],
'agentconnectionattempts' : de_serialize_payload['AgentConnectionAttempts'],
'agentinteractionduration' : de_serialize_payload['Agent']['AgentInteractionDuration'],
'answeringmachinedetectionstatus' : de_serialize_payload['AnsweringMachineDetectionStatus'],
'channel' : de_serialize_payload['Channel'],
'connectedtoagenttimestamp' : de_serialize_payload['Agent']['ConnectedToAgentTimestamp'],
'connectedtosystemtimestamp' : de_serialize_payload['ConnectedToSystemTimestamp'],
'customerendpointaddress' : de_serialize_payload['CustomerEndpoint']['Address'],
'customerendpointtype' : de_serialize_payload['CustomerEndpoint']['Type'],
'customerholdduration' : de_serialize_payload['Agent']['CustomerHoldDuration'],
'dequeuetimestamp' : de_serialize_payload['Queue']['DequeueTimestamp'],
'disconnectreason' : de_serialize_payload['DisconnectReason'],
'disconnecttimestamp' : de_serialize_payload['DisconnectTimestamp'],
'queueduration' : de_serialize_payload['Queue']['Duration'],
'enqueuetimestamp' : de_serialize_payload['Queue']['EnqueueTimestamp'],
'hierarchygroups' : de_serialize_payload['Agent']['HierarchyGroups'],
'initialcontactid' : de_serialize_payload['InitialContactId'],
'initiationmethod' : de_serialize_payload['InitiationMethod'],
'initiationtimestamp' : de_serialize_payload['InitiationTimestamp'],
'instancearn' : de_serialize_payload['InstanceARN'],
'lastupdatetimestamp' : de_serialize_payload['LastUpdateTimestamp'],
'longestholdduration' : de_serialize_payload['Agent']['LongestHoldDuration'],
'nextcontactid' : de_serialize_payload['NextContactId'],
'numberofholds' : de_serialize_payload['Agent']['NumberOfHolds'],
'previouscontactid': de_serialize_payload['PreviousContactId'],
'queuearn' : de_serialize_payload['Queue']['ARN'],
'queuename' : de_serialize_payload['Queue']['Name'],
'recordingdeletionreason' : de_serialize_payload['Recording']['DeletionReason'],
'recordinglocation' : de_serialize_payload['Recording']['Location'],
'recordingstatus' : de_serialize_payload['Recording']['Status'],
'recordingtype' : de_serialize_payload['Recording']['Type'],
'routingprofilearn' : de_serialize_payload['Agent']['RoutingProfile']['ARN'],
'routingprofilename' : de_serialize_payload['Agent']['RoutingProfile']['Name'],
'scheduledtimestamp' : de_serialize_payload['ScheduledTimestamp'],
'systemendpointaddress' : de_serialize_payload['SystemEndpoint']['Address'],
'systemendpointtype' : de_serialize_payload['SystemEndpoint']['Type'],
'transfercompletedtimestamp' : de_serialize_payload['TransferCompletedTimestamp'],
'transferredtoendpoint' : de_serialize_payload['TransferredToEndpoint']['Address'],
'username' : de_serialize_payload['Agent']['Username'],
'voiceidresult' : de_serialize_payload['VoiceIdResult'],
'id' : de_serialize_payload['ContactId']
}
columns = required_data.keys()
print(columns)
values = [required_data[column] for column in columns]
print(values)
insert_statement = "insert into public.ctr (%s) values %s;"
cur.execute(insert_statement, (AsIs(','.join(columns)), tuple(values)))
print(cur.mogrify(insert_statement, (AsIs(','.join(columns)), tuple(values))))
conn.commit()
count = cur.rowcount
print(count, "Record inserted successfully into mobile table")
print("Agent, customer endpoint, transfer endpoint and recording data is available")
After one successful iteration it again starts iterating. I have spent more than two days on it and didn't figure out what's the problem.
I would really appreciate if someone guides me and sort out this query.
The issue was in my code. I was not ending my function successfully. It is Kinesis behavior if you are not ending your function successfully (200 OK) then kinesis reinvokes your function several times. So it it necessary to end your function properly.
Here is the script which i have working inside MongoDBs Compass and working inside Studio 3T
db.getCollection("collection").find({
geometry: {
$geoIntersects: {
$geometry: {
type: "Polygon" ,
coordinates: [ [
[-2.4478329206542915, 52.679303638992494], [-2.4423397565917915, 52.677534343091544], [-2.445601322753901, 52.67430779548455], [-2.4509228254394477, 52.676129262942176], [-2.4478329206542915, 52.679303638992494]
] ]
}
}
}
})
and it returns the following results:
As you can see the code seems to work fine, but when I try to run this inside a Python script it keeps failing and I need a little help pointing out the seemingly not so obvious error.
I have changed the fin statement to a simple field find and it works, just not when its a geoIntersects find statement.
However, if I try to do this inside python I keep getting errors.
Please see below for a copy of my python script, sorry for the redacted comments but need to keep data secure.
import pymongo
#mongodb service vars
dbStartc = 'mongodb+srv://'
dbDomain = '#cluster0.o7cud.azure.mongodb.net'
dbUser = 'redacted'
dbPass = 'redacted'
dbName = 'redacted'
dbColl = 'redacted'
dbSettings = '/?retryWrites=true&w=majority'
#test vars
dbName_connected = False
dbColl_connected = False
try:
#conect to the mongodb instance
mongoURL = dbStartc + dbUser + ':' + dbPass + dbDomain + dbSettings
mongocon = pymongo.MongoClient(mongoURL)
#connect to the database
dblist = mongocon.list_database_names()
if dbName in dblist:
mongocdb = mongocon[dbName]
#print("MongoDB Service - Database connected")
dbName_connected = True
#connect to the collection we need
collist = mongocdb.list_collection_names()
if dbColl in collist:
mongocol = mongocdb[dbColl]
#print("MongoDB Service - Collection connected")
dbColl_connected = True
#pre checks test
if dbName_connected and dbColl_connected :
#print("MongoDB Service - Connection Checks Complete")
find_result = []
found_count = 0
found_count = mongocol.count_documents( )
if found_count > 0 :
print("Collection Document Count: " + found_count)
mydoc = mongocol.find({ geometry: { $geoIntersects: { $geometry: { type: "Polygon" , coordinates: [ [ [-2.44783, 52.67930], [-2.44233, 52.67753], [-2.44560, 52.67430], [-2.45092, 52.67612], [-2.44783, 52.67930] ] ] } } } })
#for x in
for x in mydoc:
find_result += [x]
print(find_result)
else :
print("MongoDB Service - Connection Checks Failed")
except Exception as ex:
print ("Something you were not expecting went wrong! (" + ex + ")")
Here is the error:
Any help in getting python to work would be greatly appreciated
Following is my code to add new defect to rally using python:
import sys import time from pyral import Rally, rallyWorkset
server = "rally1.rallydev.com"
user = "*****"
password = "****"
apikey = "****"
workspace = "****"
project = "****"
rally = Rally(server, user=user, password=password,apikey=apikey, workspace=workspace, project=project)
project_req = rally.get('Project', fetch=True, query='Name = "%s"' % (project))
project = project_req.next()
priority = "3.Normal"
severity = "Major Problem"
name = "prabhakar.sharma#***.com"
#defectID = 'INC0547865'
description = "A Test Rally User Story created using python API now, start working on it as fast as you all could !!"
user = rally.getUserInfo(username=name).pop(0)
#rally.setProject(proj)
print("%s %s "% (project.oid , user.ref))
defect_data = { "Project" : project.ref,
"SubmittedBy" : user.ref,
"Name" : name,
"Severity" : severity,
"Priority" : priority,
"State" : "Open",
"ScheduleState" : "Defined",
"Owner": user.ref,
"Description" : description
}
defect = rally.create('Defect', defect_data) print("Defect created, ObjectID: %s FormattedID: %s" % (defect.oid, defect.FormattedID))
Traceback:
Traceback (most recent call last): File "pythonrally.py", line 186,
in
defect = rally.create('Defect', defect_data) File "C:\Users\PRABHAKAR.SHARMA\AppData\Local\Programs\Python\Python37\pyral\restapi.py",
line 1024, in put
raise RallyRESTAPIError(problem) pyral.restapi.RallyRESTAPIError: 422 Validation error: Defect.PRJ# should not be null
Changes:
Must use "username=name" (where 'name' is a login ID) instead of "username=user".
The field "When" is not part of a Defect (commented out below).
The field "PRJ#" is not part of a Defect (commented out below).
The project ref must be specified as "proj.ref" instead of "project.ref".
$ diff code.orig code.new
10c10
< user = rally.getUserInfo(username=user).pop(0)
---
> user = rally.getUserInfo(username=name).pop(0)
12,14c12,14
< defect_data = { "When" : when,
< "PRJ#" : project.oid,
< "Project" : project.ref,
---
> defect_data = { #"When" : when,
> #"PRJ#" : proj.oid,
> "Project" : proj.ref,
The above changes worked for me.
So, I changed below line :
defect = rally.create('Defect', defect_data) print("Defect created, ObjectID: %s FormattedID: %s" % (defect.oid, defect.FormattedID))
to
defect = rally.create('UserStory', defect_data) print("Defect created, ObjectID: %s FormattedID: %s" % (defect.oid, defect.FormattedID))
and following what errors were telling me I got it working.
Also, I removed few fields from 'defect_data' as following:
defect_data = { "Project" : project.ref,
#"SubmittedBy" : user.ref,
"Name" : name,
#"Severity" : severity,
"Priority" : priority,
#"State" : "Open",
"ScheduleState" : "Defined",
"Owner": user.ref,
"Description" : description
}
Final Code:
project_req = rally.get('Project', fetch=True, query='Name = "%s"' % (project))
project = project_req.next()
priority = "Normal"
severity = "Major Problem"
name = "prabhakar.sharma#dish.com"
#when = "2018.12.26 - 2019.01.01"
#defectNum = 'INC0547865'
description = "Just one more test Rally User Story created using python API now, start working on it as fast as you all could !!"
user = rally.getUserInfo(username=user).pop(0)
#rally.setProject(proj)
print("%s %s "% (project.ref , user.ref))
defect_data = { "Project" : project.ref,
#"SubmittedBy" : user.ref,
"Name" : name,
#"Severity" : severity,
"Priority" : priority,
#"State" : "Open",
"ScheduleState" : "Defined",
"Owner": user.ref,
"Description" : description
}
try:
print("am here %s "% defect_data)
defect = rally.create('UserStory', defect_data)
except Exception as e:
sys.stderr.write('ERROR: %s \n' % Exception)
print(e)
sys.exit(1)
Hope this serves someone facing any problem like this one !! Cheers
I need test the accuracy of a server mongodb.
I am trying to insert a sequence of data, take the moment and it was sent to the database to know when it was inserted. I'm trying this:
#!/usr/bin/python
from pymongo import Connection
from datetime import date, timedelta, datetime
class FilterData:
#classmethod
def setData(self, serialData):
try:
con = Connection('IP_REMOTE', 27017, safe=True)
db = con['resposta']
inoshare = db.resposta
inoshare.insert(serialData)
con.close()
except Exception as e:
print "Erro no filter data: ", e.message, e.args
obj = FilterData()
inicio = datetime.now()
termino = inicio + timedelta(seconds=10)
contador = 1
while inicio <= termino:
print contador, inicio.strftime('%d-%m-%Y %H:%M:%S')
pacote = {'contador':contador, 'datahora':$currentDate()}
obj.setData(pacote)
contador += 1
But the variables of mongodb (using $) are not recognized in python. How to proceed to accomplish this integration?
Obs: IP_REMOTE = my valid IP on REMOTE server
then tried the following, but only inserts a single record.
#!/usr/bin/python
from pymongo import Connection
from datetime import date, timedelta, datetime
import time
class FilterData:
def __init__(self):
self.con = Connection('54.68.148.224', 27017, safe=True)
self.db = self.con['resposta']
self.inoshare = self.db.resposta
def setData(self, serialData):
try:
self.inoshare.update({}, serialData, upsert=True)
except Exception as e:
print "Erro no filter data: ", e.message, e.args
def desconect(self):
self.con.close()
obj = FilterData()
inicio = datetime.now()
termino = inicio + timedelta(seconds=30)
while inicio <= termino:
print inicio.strftime('%d-%m-%Y %H:%M:%S')
pacote = {'$currentDate': {'datahora': { '$type': 'date' }}}
obj.setData(pacote)
inicio = datetime.now()
time.sleep(1)
obj.desconect()
Operator expressions in MongoDB are represented in the data structure as a string. These are also "update operators", so $currentDate is meant to be used in the "update object" portion of an .update() method.
So something like this to insert a new record with the "$currentDate" from the server:
db = con['resposta']
inoshare = db.resposta
inoshare.update({}, {
'$currentDate': {
'datahora': { '$type': 'date' }
}
},upsert=True)
Presuming of course there is nothing in your collection. Otherwise make sure the "query" portion of the .update() statement does not match a document when you want to "insert"/"upsert" as it were.
All the documentation options in the MongoDB manual pages are as JSON notation relevant to the MongoDB shell, but however this is not that different from the notation of many dyamically typed languages such as python, ruby and Perl.
BTW. Unless you are really testing in distinct scripts, then do not make a connection and disconnect before and after every operation. Database collections should stay open for the life-cycle of your application.
You should pass the python code to mongo like this,
>>> from datetime import datetime
>>> datetime.now()
Your code:
pacote = {'contador':contador, 'datahora':datetime.now()}
Thanks to everyone who helped me. I understand now, first do an insert and then an update. Like this:
class FilterData:
def __init__(self):
self.con = Connection('IP_REMOTE', 27017, safe=True)
self.db = self.con['resposta']
self.inoshare = self.db.resposta
self.contador = 1
def setData(self, serialData):
try:
self.inoshare.insert({'contador': self.contador}, serialData, upsert=True)
print self.contador, datetime.now().strftime('%d-%m-%Y %H:%M:%S.%f')
self.inoshare.update({'contador': self.contador}, serialData, upsert=True)
self.contador += 1
except Exception as e:
print "Erro no filter data: ", e.message, e.args
def desconect(self):
self.con.close()
that way I can check the time that the query was sent and the moment she was executed on the remote server.
On-site host I have the following output, for example:
1 08-11-2014 15:37:45.079000
1 08-11-2014 15:38:04.039000
2 08-11-2014 15:38:05.410000
3 08-11-2014 15:38:06.785000
4 08-11-2014 15:38:08.153000
5 08-11-2014 15:38:09.522000
6 08-11-2014 15:38:10.886000
7 08-11-2014 15:38:12.243000
8 08-11-2014 15:38:13.609000
And on the remote server I get the following output:
{"contador" : 1, "datahora" : ISODate("2014-11-08T18:38:05.323Z") }
{"contador" : 2, "datahora" : ISODate("2014-11-08T18:38:06.687Z") }
{"contador" : 3, "datahora" : ISODate("2014-11-08T18:38:08.060Z") }
{"contador" : 4, "datahora" : ISODate("2014-11-08T18:38:09.429Z") }
{"contador" : 5, "datahora" : ISODate("2014-11-08T18:38:10.796Z") }
{"contador" : 6, "datahora" : ISODate("2014-11-08T18:38:12.162Z") }
{"contador" : 7, "datahora" : ISODate("2014-11-08T18:38:13.527Z") }
{"contador" : 8, "datahora" : ISODate("2014-11-08T18:38:14.893Z") }
That way I can identify the time difference between the time of the update and the moment he really was iserido in the database. Note: The clocks are synchronized.