Random Time Generation - python

I am Newbie to Python I am trying a little Random Time Generator which generates random time from the given initialize variable and ending at the given end variable for the 1000 records and have to save those 1000 records into database.
So Far i have reached is this code.
SQL.py
from sqlalchemy import create_engine, Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///sql.sqlite')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
time = Column(Integer, default=None, index=True)
Base.metadata.create_all(engine)
Random.py
import datetime
import time
import random
MINTIME = datetime.datetime(2010,8,6,8,14,59)
MAXTIME = datetime.datetime(2013,8,6,8,14,59)
RECORDS = 1000
for RECORD in range(RECORDS):
RANDOMTIME = random.randint(MINTIME, MAXTIME)
print RANDOMTIME
It produces the traceback as this
TypeError: unsupported operand type(s) for +: 'datetime.datetime' and 'int'
What i am doing wrong and if possible suggest some refactored method.

Basically, the issue is: random.randint expects integers. So you should give it integers, and convert it back to datetime as you require
There could be a more efficient way, but here is one approach:
import datetime
import time
MINTIME = datetime.datetime(2010,8,6,8,14,59)
MAXTIME = datetime.datetime(2013,8,6,8,14,59)
mintime_ts = int(time.mktime(MINTIME.timetuple()))
maxtime_ts = int(time.mktime(MAXTIME.timetuple()))
for RECORD in range(RECORDS):
random_ts = random.randint(mintime_ts, maxtime_ts)
RANDOMTIME = datetime.datetime.fromtimestamp(random_ts)
print RANDOMTIME

The problem is:
RANDOMTIME = random.randint(MINTIME, MAXTIME)
randint expects two integers, you are providing two dates.
you can do the following, considering the time is the same for MINTIME and MAXTIME:
for RECORD in range(RECORDS):
n = random.randint(0,(MAXTIME-MINTIME).days)
RANDOMTIME = MINTIME + datetime.deltatime(n)

Use the start and end dates to determine the number of seconds between, then generate a random number between 0 and that, then add it to the start date, eg:
from datetime import datetime, timedelta
from random import randint
MINTIME = datetime(2010,8,6,8,14,59)
MAXTIME = datetime(2013,8,6,8,14,59)
PERIOD = (MAXTIME-MINTIME).total_seconds()
for n in range(1000):
dt = MINTIME + timedelta(seconds=randint(0, PERIOD))
# 2012-12-10 18:34:23
# etc...

Related

Python: determine current time is not older than 5 minutes

from API I get my latest event time, I want to check if my event time coming from the API is not older than 5 minutes, here is my code
import json
from typing import Optional
import datetime
import time
import requests
def check_event():
now = datetime.datetime.now()
old_time = datetime.datetime.now() - datetime.timedelta(minutes=5)
res = requests.post(URL, data=json.dumps(PAYLOAD), headers=headers)
available_event = res.json()
print(available_event[0]['result']['time'])
event_time = available_lag[0]['result']['time']
ev2 = datetime.datetime.strptime(event_time,'%Y-%m-%dT%H:%M:%S.%f%z' )
print(ev2)
if event_time < old_time:
print(" old")
else:
print("fresh")
from my API time returns in this formate
2022-04-14T07:28:08.000Z
and when I strip the event_time to convert str to datetime, I get following outout
2022-04-14 07:49:27+00:00
and print of the old_time varible format is following
2022-04-14 10:23:08.169712
and when I compare both times, I get following error
TypeError: '<' not supported between instances of 'str' and
'datetime.datetime'
how to fix this?
[Edited]. Yeah, as is stated bellow you can use timezone from datetime module:
from datetime import datetime, timedelta, timezone
def check_event(event_time):
event_time = datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S.%f%z')
return event_time > datetime.now(timezone.utc) - timedelta(minutes=5)
time_from_API = '2022-04-14T07:28:08.000Z'
print(check_event(time_from_API))

Reset index name in elasticsearch dsl

I'm trying to create an ETL that extracts from mongo, process the data and loads into elastic. I will do a daily load so I thought of naming my index with the current date. This will help me for a later processing I need to do with this first index.
I used elasticsearch dsl guide: https://elasticsearch-dsl.readthedocs.io/en/latest/persistence.html
The problem that I have comes from my little experience with working with classes. I don't know how to reset the Index name from the class.
Here is my code for the class (custom_indices.py):
from elasticsearch_dsl import Document, Date, Integer, Keyword, Text
from elasticsearch_dsl.connections import connections
from elasticsearch_dsl import Search
import datetime
class News(Document):
title = Text(analyzer='standard', fields={'raw': Keyword()})
manual_tagging = Keyword()
class Index:
name = 'processed_news_'+datetime.datetime.now().strftime("%Y%m%d")
def save(self, ** kwargs):
return super(News, self).save(** kwargs)
def is_published(self):
return datetime.now() >= self.processed
And this is the part of the code where I create the instance to that class:
from custom_indices import News
import elasticsearch
import elasticsearch_dsl
from elasticsearch_dsl.connections import connections
import pandas as pd
import datetime
connections.create_connection(hosts=['localhost'])
News.init()
for index, doc in df.iterrows():
new_insert = News(meta={'id': doc.url_hashed},
title = doc.title,
manual_tagging = doc.customTags,
)
new_insert.save()
Every time I call the "News" class I would expect to have a new name. However, the name doesn't change even if I load the class again (from custom_indices import News). I know this is only a problem I have when testing but I'd like to know how to force that "reset". Actually, I originally wanted to change the name outside the class with this line right before the loop:
News.Index.name = "NEW_NAME"
However, that didn't work. I was still seeing the name defined on the class.
Could anyone please assist?
Many thanks!
PS: this must be just an object oriented programming issue. Apologies for my ignorance on the subject.
Maybe you could take advantage of the fact that Document.init() accepts an index keyword argument. If you want the index name to get set automatically, you could implement init() in the News class and call super().init(...) in your implementation.
A simplified example (python 3.x):
from elasticsearch_dsl import Document
from elasticsearch_dsl.connections import connections
import datetime
class News(Document):
#classmethod
def init(cls, index=None, using=None):
index_name = index or 'processed_news_' + datetime.datetime.now().strftime("%Y%m%d")
return super().init(index=index_name, using=using)
You can override the index when you call save() .
new_insert.save('processed_news_' + datetime.datetime.now().strftime("%Y%m%d"))
Example as following.
# coding: utf-8
import datetime
from elasticsearch_dsl import Keyword, Text, \
Index, Document, Date
from elasticsearch_dsl.connections import connections
HOST = "localhost:9200"
index_names = [
"foo-log-",
"bar-log-",
]
default_settings = {"number_of_shards": 4, "number_of_replicas": 1}
index_settings = {
"foo-log-": {
"number_of_shards": 40,
"number_of_replicas": 1
}
}
class LogDoc(Document):
level = Keyword(ignore_above=256)
date = Date(format="yyyy-MM-dd'T'HH:mm:ss.SSS")
hostname = Text(fields={'fields': Keyword(ignore_above=256)})
message = Text()
createTime = Date(format="yyyy-MM-dd'T'HH:mm:ss.SSS")
def auto_create_index():
'''自动创建ES索引'''
connections.create_connection(hosts=[HOST])
for day in range(3):
dt = datetime.datetime.now() + datetime.timedelta(days=day)
for index in index_names:
name = index + dt.strftime("%Y-%m-%d")
settings = index_settings.get(index, default_settings)
idx = Index(name=name)
idx.document(LogDoc)
idx.settings(**settings)
try:
idx.create()
except Exception as e:
print(e)
continue
print("create index %s" % name)
if __name__ == '__main__':
auto_create_index()

How to JSON dump to a rotating file object

I'm writing a program which periodically dumps old data from a RethinkDB database into a file and removes it from the database. Currently, the data is dumped into a single file which grows without limit. I'd like to change this so that the maximum file size is, say, 250 Mb, and the program starts to write to a new output file just before this size is exceeded.
It seems like Python's RotatingFileHandler class for loggers does approximately what I want; however, I'm not sure whether logging can be applied to any JSON-dumpable object or just to strings.
Another possible approach would be to use (a variant of) Mike Pennington's
RotatingFile class (see python: outfile to another text file if exceed certain file size).
Which of these approaches is likely to be the most fruitful?
For reference, my current program is as follows:
import os
import sys
import json
import rethinkdb as r
import pytz
from datetime import datetime, timedelta
import schedule
import time
import functools
from iclib import RethinkDB
import msgpack
''' The purpose of the Controller is to periodically archive data from the "sensor_data" table so that it does not grow without limit.'''
class Controller(RethinkDB):
def __init__(self, db_address=(os.environ['DB_ADDR'], int(os.environ['DB_PORT'])), db_name=os.environ['DB_NAME']):
super(Controller, self).__init__(db_address=db_address, db_name=db_name) # Initialize the IperCronComponent with the default logger name (in this case, "Controller")
self.db_table = RethinkDB.SENSOR_DATA_TABLE # The table name is "sensor_data" and is stored as a class variable in RethinkDBMixIn
def generate_archiving_query(self, retention_period=timedelta(days=3)):
expiry_time = r.now() - retention_period.total_seconds() # Timestamp before which data is to be archived
if "timestamp" in r.table(self.db_table).index_list().run(self.db): # If "timestamp" is a secondary index
beginning_of_time = r.time(1400, 1, 1, 'Z') # The minimum time of a ReQL time object (i.e., the year 1400 in the UTC timezone)
data_to_archive = r.table(self.db_table).between(beginning_of_time, expiry_time, index="timestamp") # Generate query using "between" (faster)
else:
data_to_archive = r.table(self.db_table).filter(r.row['timestamp'] < expiry_time) # Generate the same query using "filter" (slower, but does not require "timestamp" to be a secondary index)
return data_to_archive
def archiving_job(self, data_to_archive=None, output_file="archived_sensor_data.json"):
if data_to_archive is None:
data_to_archive = self.generate_archiving_query() # By default, the call the "generate_archiving_query" function to generate the query
old_data = data_to_archive.run(self.db, time_format="raw") # Without time_format="raw" the output does not dump to JSON
with open(output_file, 'a') as f:
ids_to_delete = []
for item in old_data:
print item
# msgpack.dump(item, f)
json.dump(item, f)
f.write('\n') # Separate each document by a new line
ids_to_delete.append(item['id'])
r.table(self.db_table).get_all(r.args(ids_to_delete)).delete().run(self.db) # Delete based on ID. It is preferred to delete the entire batch in a single operation rather than to delete them one by one in the for loop.
def test_job_1():
db_name = "ipercron"
table_name = "sensor_data"
port_offset = 1 # To avoid interference of this testing program with the main program, all ports are initialized at an offset of 1 from the default ports using "rethinkdb --port_offset 1" at the command line.
conn = r.connect("localhost", 28015 + port_offset)
r.db(db_name).table(table_name).delete().run(conn)
import rethinkdb_add_data
controller = Controller(db_address=("localhost", 28015+port_offset))
archiving_job = functools.partial(controller.archiving_job, data_to_archive=controller.generate_archiving_query())
return archiving_job
if __name__ == "__main__":
archiving_job = test_job_1()
schedule.every(0.1).minutes.do(archiving_job)
while True:
schedule.run_pending()
It is not completely 'runnable' from the part shown, but the key point is that I would like to replace the line
json.dump(item, f)
with a similar line in which f is a rotating, and not fixed, file object.
Following Stanislav Ivanov, I used json.dumps to convert each RethinkDB document to a string and wrote this to a RotatingFileHandler:
import os
import sys
import json
import rethinkdb as r
import pytz
from datetime import datetime, timedelta
import schedule
import time
import functools
from iclib import RethinkDB
import msgpack
import logging
from logging.handlers import RotatingFileHandler
from random_data_generator import RandomDataGenerator
''' The purpose of the Controller is to periodically archive data from the "sensor_data" table so that it does not grow without limit.'''
os.environ['DB_ADDR'] = 'localhost'
os.environ['DB_PORT'] = '28015'
os.environ['DB_NAME'] = 'ipercron'
class Controller(RethinkDB):
def __init__(self, db_address=None, db_name=None):
if db_address is None:
db_address = (os.environ['DB_ADDR'], int(os.environ['DB_PORT'])) # The default host ("rethinkdb") and port (28015) are stored as environment variables
if db_name is None:
db_name = os.environ['DB_NAME'] # The default database is "ipercron" and is stored as an environment variable
super(Controller, self).__init__(db_address=db_address, db_name=db_name) # Initialize the instance of the RethinkDB class. IperCronComponent will be initialized with its default logger name (in this case, "Controller")
self.db_name = db_name
self.db_table = RethinkDB.SENSOR_DATA_TABLE # The table name is "sensor_data" and is stored as a class variable of RethinkDBMixIn
self.table = r.db(self.db_name).table(self.db_table)
self.archiving_logger = logging.getLogger("archiving_logger")
self.archiving_logger.setLevel(logging.DEBUG)
self.archiving_handler = RotatingFileHandler("archived_sensor_data.log", maxBytes=2000, backupCount=10)
self.archiving_logger.addHandler(self.archiving_handler)
def generate_archiving_query(self, retention_period=timedelta(days=3)):
expiry_time = r.now() - retention_period.total_seconds() # Timestamp before which data is to be archived
if "timestamp" in self.table.index_list().run(self.db):
beginning_of_time = r.time(1400, 1, 1, 'Z') # The minimum time of a ReQL time object (namely, the year 1400 in UTC)
data_to_archive = self.table.between(beginning_of_time, expiry_time, index="timestamp") # Generate query using "between" (faster, requires "timestamp" to be a secondary index)
else:
data_to_archive = self.table.filter(r.row['timestamp'] < expiry_time) # Generate query using "filter" (slower, but does not require "timestamp" to be a secondary index)
return data_to_archive
def archiving_job(self, data_to_archive=None):
if data_to_archive is None:
data_to_archive = self.generate_archiving_query() # By default, the call the "generate_archiving_query" function to generate the query
old_data = data_to_archive.run(self.db, time_format="raw") # Without time_format="raw" the output does not dump to JSON or msgpack
ids_to_delete = []
for item in old_data:
print item
self.dump(item)
ids_to_delete.append(item['id'])
self.table.get_all(r.args(ids_to_delete)).delete().run(self.db) # Delete based on ID. It is preferred to delete the entire batch in a single operation rather than to delete them one by one in the for-loop.
def dump(self, item, mode='json'):
if mode == 'json':
dump_string = json.dumps(item)
elif mode == 'msgpack':
dump_string = msgpack.packb(item)
self.archiving_logger.debug(dump_string)
def populate_database(db_name, table_name, conn):
if db_name not in r.db_list().run(conn):
r.db_create(db_name).run(conn) # Create the database if it does not yet exist
if table_name not in r.db(db_name).table_list().run(conn):
r.db(db_name).table_create(table_name).run(conn) # Create the table if it does not yet exist
r.db(db_name).table(table_name).delete().run(conn) # Empty the table to start with a clean slate
# Generate random data with timestamps uniformly distributed over the past 6 days
random_data_time_interval = timedelta(days=6)
start_random_data = datetime.utcnow().replace(tzinfo=pytz.utc) - random_data_time_interval
random_generator = RandomDataGenerator(seed=0)
packets = random_generator.packets(N=100, start=start_random_data)
# print packets
print "Adding data to the database..."
r.db(db_name).table(table_name).insert(packets).run(conn)
if __name__ == "__main__":
db_name = "ipercron"
table_name = "sensor_data"
port_offset = 1 # To avoid interference of this testing program with the main program, all ports are initialized at an offset of 1 from the default ports using "rethinkdb --port_offset 1" at the command line.
host = "localhost"
port = 28015 + port_offset
conn = r.connect(host, port) # RethinkDB connection object
populate_database(db_name, table_name, conn)
# import rethinkdb_add_data
controller = Controller(db_address=(host, port))
archiving_job = functools.partial(controller.archiving_job, data_to_archive=controller.generate_archiving_query()) # This ensures that the query is only generated once. (This is sufficient since r.now() is re-evaluated every time a connection is made).
schedule.every(0.1).minutes.do(archiving_job)
while True:
schedule.run_pending()
In this context the RethinkDB class does little other than define the class variable SENSOR_DATA_TABLE and the RethinkDB connection, self.db = r.connect(self.address[0], self.address[1]). This is run together with a module for generating fake data, random_data_generator.py:
import random
import faker
from datetime import datetime, timedelta
import pytz
import rethinkdb as r
class RandomDataGenerator(object):
def __init__(self, seed=None):
self._seed = seed
self._random = random.Random()
self._random.seed(seed)
self.fake = faker.Faker()
self.fake.random.seed(seed)
def __getattr__(self, x):
return getattr(self._random, x)
def name(self):
return self.fake.name()
def datetime(self, start=None, end=None):
if start is None:
start = datetime(2000, 1, 1, tzinfo=pytz.utc) # Jan 1st 2000
if end is None:
end = datetime.utcnow().replace(tzinfo=pytz.utc)
if isinstance(end, datetime):
dt = end - start
elif isinstance(end, timedelta):
dt = end
assert isinstance(dt, timedelta)
random_dt = timedelta(microseconds=self._random.randrange(int(dt.total_seconds() * (10 ** 6))))
return start + random_dt
def packets(self, N=1, start=None, end=None):
return [{'name': self.name(), 'timestamp': self.datetime(start=start, end=end)} for _ in range(N)]
When I run controller it produces several rolled-over output logs, each at most 2 kB in size, as expected:

Python loop inserting last row only in cassandra

I typed a small demo loop in order to insert random values in Cassandra but only the last record is persisted into the database. I am using cassandra-driver from datastax and its object modeling lib. Cassandra version is 3.7 and Python 3.4. Any idea what I am doing wrong?
#!/usr/bin/env python
import datetime
import uuid
from random import randint, uniform
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection, columns
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery
class TestTable(Model):
_table_name = 'test_table'
key = columns.UUID(primary_key=True, default=uuid.uuid4())
type = columns.Integer(index=True)
value = columns.Float(required=False)
created_time = columns.DateTime(default=datetime.datetime.now())
def main():
connection.setup(['127.0.0.1'], 'test', protocol_version = 3)
sync_table(TestTable)
for _ in range(10):
type = randint(1, 3)
value = uniform(-10, 10)
row = TestTable.create(type=type, value=value)
print("Inserted row: ", row.type, row.value)
print("Done inserting")
q = TestTable.objects.count()
print("We have inserted " + str(q) + " rows.")
if __name__ == "__main__":
main()
Many thanks!
The problem is in the definition of the key column:
key = columns.UUID(primary_key=True, default=uuid.uuid4())
For the default value it's going to call the uuid.uuid4 function once and use that result as the default for all future inserts. Because that's your primary key, all 10 writes will happen to the same primary key.
Instead, drop the parentheses so you are just passing a reference to uuid.uuid4 rather than calling it:
key = columns.UUID(primary_key=True, default=uuid.uuid4)
Now each time you create a row you'll get a new unique UUID value, and therefore a new row in Cassandra.
You need to use the method save.
...
row = TestTable(type=type, value=value)
row.save()
...
http://cqlengine.readthedocs.io/en/latest/topics/models.html#cqlengine.models.Model.save

Jira python calculate time

I am trying to calculate the time from the issue is created and until it is resolved. With these fields:
creation_time = issue.fields.created
resolved_time = issue.fields.resolutiondate
Output when I print:
Creation: 2016-06-09T14:37:05.000+0200 Resolved: 2016-06-10T10:53:12.000+0200
Is there anyway I can minus the resolution date and time with the creation date and time to find how much time is spent on a issue?
Parse the date/time strings into a suitable datetime object and then you can use those to do calculations.
This post explains how to parse a date/time string or you can just take a look at the documentation for the strptime() method.
For calculations, there are examples in this post and there's detailed documentation here.
As an example, something like this should be close to a solution:
from datetime import datetime
from datetime import timedelta
createdTime = datetime.strptime('2016-06-09T14:37:05.000+0200', '%Y-%m-%dT%H:%M:%S.%f')
resolvedTime = datetime.strptime('2016-06-10T10:53:12.000+0200', '%Y-%m-%dT%H:%M:%S.%f')
duration = resolvedTime - createdTime
duration will be a timedelta object and you can access duration.days, duration.seconds and duration.microseconds to get its info.
strptime does have as a drawback that it does not support parsing timezones, so you'll have to cut that part of your input first. Alternatively, see this post.
strptime does not support parsing timezones.
This code is working for me
from datetime import datetime
createdTime = datetime.strptime(issue.fields.created.split(".")[0], '%Y-%m-%dT%H:%M:%S')
resolvedTime = datetime.strptime(issue.fields.resolutiondate.split(".")[0], '%Y-%m-%dT%H:%M:%S')
duration = resolvedTime - createdTime
I've write a function which calculates mean, median and variance of the respond times in days. Hope that helps;
import datetime as d
import numpy as np
ymd_create = []
ymd_resdate = []
delta_t = []
class calculate:
def __init__(self):
self.result = 0
def meantime(self, issueobject):
for i in range(0, len(issueobject)):
ymd_create.append(d.datetime(int(issueobject[i].raw[u'fields'][u'created'].split('T')[0].split('-')[0]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[0].split('-')[1]), int(issueobject[i].raw[u'fields']
[u'created'].split('T')[0].split('-')[2]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[1].split(':')[0]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[1].split(':')[1])))
ymd_resdate.append(d.datetime(int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[0].split('-')[0]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[0].split('-')[1]), int(issueobject[i].raw[u'fields']
[u'resolutiondate'].split('T')[0].split('-')[2]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[1].split(':')[0]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[1].split(':')[1])))
delta_t.append((ymd_resdate[i] - ymd_create[i]).days)
self.result = np.mean(np.array(delta_t))
return self.result
def mediantime(self, issueobject):
for i in range(0, len(issueobject)):
ymd_create.append(d.datetime(int(issueobject[i].raw[u'fields'][u'created'].split('T')[0].split('-')[0]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[0].split('-')[1]), int(issueobject[i].raw[u'fields']
[u'created'].split('T')[0].split('-')[2]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[1].split(':')[0]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[1].split(':')[1])))
ymd_resdate.append(d.datetime(int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[0].split('-')[0]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[0].split('-')[1]), int(issueobject[i].raw[u'fields']
[u'resolutiondate'].split('T')[0].split('-')[2]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[1].split(':')[0]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[1].split(':')[1])))
delta_t.append((ymd_resdate[i] - ymd_create[i]).days)
self.result = np.median(np.array(delta_t))
return self.result
def variancetime(self, issueobject):
for i in range(0, len(issueobject)):
ymd_create.append(d.datetime(int(issueobject[i].raw[u'fields'][u'created'].split('T')[0].split('-')[0]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[0].split('-')[1]), int(issueobject[i].raw[u'fields']
[u'created'].split('T')[0].split('-')[2]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[1].split(':')[0]), int(issueobject[i].raw[u'fields'][u'created'].split('T')[1].split(':')[1])))
ymd_resdate.append(d.datetime(int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[0].split('-')[0]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[0].split('-')[1]), int(issueobject[i].raw[u'fields']
[u'resolutiondate'].split('T')[0].split('-')[2]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[1].split(':')[0]), int(issueobject[i].raw[u'fields'][u'resolutiondate'].split('T')[1].split(':')[1])))
delta_t.append((ymd_resdate[i] - ymd_create[i]).days)
self.result = np.var(np.array(delta_t))
return self.result

Categories

Resources