Count occurrences of Enum in a string - python

I am attempting to count the number of occurrences of an ENUM in a string value e.g.
class numbers(Enum):
one = 1
two = 2
string = "121212123324"
string.count(str(numbers.one.value))
This just seems very unintuitive to convert the enum back to string - are there any quicker ways?

Your solution is good, you can see runtime of 5 approach in below:
from timeit import timeit
from collections import Counter
from enum import Enum
class numbers(Enum):
one = 1
two = 2
three = 3
four = 4
def approach1(products):
return Counter(products)[str(numbers.one.value)]
def approach2(products):
return products.count(str(numbers.one.value))
def approach3(products):
lst = list(map(int, products))
return lst.count(int(numbers.one.value))
def approach4(products):
cnt = Counter(products)
return (cnt[str(numbers.one.value)] , str(numbers.two.value) ,
cnt[str(numbers.three.value)] , str(numbers.four.value))
def approach5(products):
cnt_o = products.count(str(numbers.one.value))
cnt_t = products.count(str(numbers.two.value))
cnt_h = products.count(str(numbers.three.value))
cnt_f = products.count(str(numbers.four.value))
return (cnt_o , cnt_t , cnt_h , cnt_f)
funcs = approach1, approach2, approach3 , approach4, approach5
products = "121212123324"*10000000
for _ in range(3):
for func in funcs:
t = timeit(lambda: func(products), number=1)
print('%.3f s ' % t, func.__name__)
print()
Output:
6.279 s approach1
0.140 s approach2
17.172 s approach3
6.403 s approach4
0.491 s approach5
6.340 s approach1
0.139 s approach2
16.049 s approach3
6.559 s approach4
0.474 s approach5
6.245 s approach1
0.143 s approach2
15.876 s approach3
6.172 s approach4
0.475 s approach5

Related

Recursive relations search between 2 columns in a table [Using python list / Dict]

I am trying to optimize a solution that I created to find recursive relations between 2 columns in a table. I need to find all accIDs for a bssID and recursively find all the bssIDs for those accIDs and so on till I find all the related bssIDs.
bssIDs
accIDs
ABC
4424
ABC
56424
ABC
2383
A100BC
2383
A100BC
4943
A100BC
4880
A100BC
6325
A100BC
4424
XYZ
123
The below solution works for an initial table of 100K rows but the below solution runs for >16 hours for a dataset of 20 million rows. I am trying to use dicts instead of list but I am unable to change the dict while iterating over the same as I am with a list.
import time
accIds = {4880: ['A100BC'], 6325: ['A100BC'], 2383: ['A100BC','ABC'],4424: ['A100BC','ABC'], 4943: ['A100BC'], 56424: ['ABC'],123: ['XYZ']}
bssIds = {'ABC': [4424,56424,2383], 'A100BC': [2383,4943,4880,6325,4424], 'XYZ':[123]}
def findBIDs(aID):
return accIds[aID]
def findAIDs(bID):
return bssIds[bID]
def getList(Ids):
return Ids.keys()
def checkList(inputList, value):
return (value in inputList)
def addToList(inputList, value):
return inputList.append(value)
def removeFromList(inputList, value):
return inputList.remove(value)
aIDlist = list(getList(accIds))
bIDlist = list(getList(bssIds))
bRelations = {}
runningList = list()
for x in bIDlist:
if not checkList(runningList,x):
aList = list()
bList = list()
addToList(bList, x)
for y in bList:
for c in findAIDs(y):
if not checkList(aList, c):
addToList(aList, c)
for z in aList:
for a in findBIDs(z):
if not checkList(bList, a):
addToList(bList, a)
bRelations.update({time.time_ns(): bList})
runningList.extend(bList)
print(bRelations)
Output : {1652374114032173632: ['ABC', 'A100BC'], 1652374114032180888: ['XYZ']}
Please suggest if there is a way to update a dict while iterating over it or If we can apply a recursive solution for the same.
This is the fastest I could think of:
accIds = {4880: frozenset(['A100BC']), 6325: frozenset(['A100BC']), 2383: frozenset(['A100BC','ABC']),4424: frozenset(['A100BC','ABC']), 4943: frozenset(['A100BC']), 56424: frozenset(['ABC']),123: frozenset(['XYZ'])}
bssIds = {'ABC': frozenset([4424,56424,2383]), 'A100BC': frozenset([2383,4943,4880,6325,4424]), 'XYZ':frozenset([123])}
def search_bssid(bssId):
traversed_accIds = set()
traversed_bssIds = {bssId}
accIds_to_check = []
bssIds_to_check = [bssId]
while bssIds_to_check:
bssId = bssIds_to_check.pop()
new_accids = bssIds[bssId] - traversed_accIds
traversed_accIds.update(new_accids)
accIds_to_check.extend(new_accids)
while accIds_to_check:
accId = accIds_to_check.pop()
new_bssids = accIds[accId] - traversed_bssIds
traversed_bssIds.update(new_bssids)
bssIds_to_check.extend(new_bssids)
return traversed_bssIds
print(search_bssid("ABC"))

Size of file, human readable [duplicate]

A function to return human readable size from bytes size:
>>> human_readable(2048)
'2 kilobytes'
>>>
How to do this?
Addressing the above "too small a task to require a library" issue by a straightforward implementation (using f-strings, so Python 3.6+):
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return f"{num:3.1f}{unit}{suffix}"
num /= 1024.0
return f"{num:.1f}Yi{suffix}"
Supports:
all currently known binary prefixes
negative and positive numbers
numbers larger than 1000 Yobibytes
arbitrary units (maybe you like to count in Gibibits!)
Example:
>>> sizeof_fmt(168963795964)
'157.4GiB'
by Fred Cirera
A library that has all the functionality that it seems you're looking for is humanize. humanize.naturalsize() seems to do everything you're looking for.
Example code (python 3.10)
import humanize
disk_sizes_list = [1, 100, 999, 1000,1024, 2000,2048, 3000, 9999, 10000, 2048000000, 9990000000, 9000000000000000000000]
for size in disk_sizes_list:
natural_size = humanize.naturalsize(size)
binary_size = humanize.naturalsize(size, binary=True)
print(f" {natural_size} \t| {binary_size}\t|{size}")
Output
1 Byte | 1 Byte |1
100 Bytes | 100 Bytes |100
999 Bytes | 999 Bytes |999
1.0 kB | 1000 Bytes |1000
1.0 kB | 1.0 KiB |1024
2.0 kB | 2.0 KiB |2000
2.0 kB | 2.0 KiB |2048
3.0 kB | 2.9 KiB |3000
10.0 kB | 9.8 KiB |9999
10.0 kB | 9.8 KiB |10000
2.0 GB | 1.9 GiB |2048000000
10.0 GB | 9.3 GiB |9990000000
9.0 ZB | 7.6 ZiB |9000000000000000000000
The following works in Python 3.6+, is, in my opinion, the easiest to understand answer on here, and lets you customize the amount of decimal places used.
def human_readable_size(size, decimal_places=2):
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']:
if size < 1024.0 or unit == 'PiB':
break
size /= 1024.0
return f"{size:.{decimal_places}f} {unit}"
There's always got to be one of those guys. Well today it's me. Here's a one-liner -- or two lines if you count the function signature.
def human_size(bytes, units=[' bytes','KB','MB','GB','TB', 'PB', 'EB']):
""" Returns a human readable string representation of bytes """
return str(bytes) + units[0] if bytes < 1024 else human_size(bytes>>10, units[1:])
 
>>> human_size(123)
123 bytes
>>> human_size(123456789)
117GB
If you need sizes bigger than an Exabyte, it's a little bit more gnarly:
def human_size(bytes, units=[' bytes','KB','MB','GB','TB', 'PB', 'EB']):
return str(bytes) + units[0] if bytes < 1024 else human_size(bytes>>10, units[1:]) if units[1:] else f'{bytes>>10}ZB'
Here's my version. It does not use a for-loop. It has constant complexity, O(1), and is in theory more efficient than the answers here that use a for-loop.
from math import log
unit_list = zip(['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'], [0, 0, 1, 2, 2, 2])
def sizeof_fmt(num):
"""Human friendly file size"""
if num > 1:
exponent = min(int(log(num, 1024)), len(unit_list) - 1)
quotient = float(num) / 1024**exponent
unit, num_decimals = unit_list[exponent]
format_string = '{:.%sf} {}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
To make it more clear what is going on, we can omit the code for the string formatting. Here are the lines that actually do the work:
exponent = int(log(num, 1024))
quotient = num / 1024**exponent
unit_list[exponent]
I recently came up with a version that avoids loops, using log2 to determine the size order which doubles as a shift and an index into the suffix list:
from math import log2
_suffixes = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
def file_size(size):
# determine binary order in steps of size 10
# (coerce to int, // still returns a float)
order = int(log2(size) / 10) if size else 0
# format file size
# (.4g results in rounded numbers for exact matches and max 3 decimals,
# should never resort to exponent values)
return '{:.4g} {}'.format(size / (1 << (order * 10)), _suffixes[order])
Could well be considered unpythonic for its readability, though.
If you're using Django installed you can also try filesizeformat:
from django.template.defaultfilters import filesizeformat
filesizeformat(1073741824)
=>
"1.0 GB"
You should use "humanize".
>>> humanize.naturalsize(1000000)
'1.0 MB'
>>> humanize.naturalsize(1000000, binary=True)
'976.6 KiB'
>>> humanize.naturalsize(1000000, gnu=True)
'976.6K'
Reference:
https://pypi.org/project/humanize/
One such library is hurry.filesize.
>>> from hurry.filesize import alternative
>>> size(1, system=alternative)
'1 byte'
>>> size(10, system=alternative)
'10 bytes'
>>> size(1024, system=alternative)
'1 KB'
Using either powers of 1000 or kibibytes would be more standard-friendly:
def sizeof_fmt(num, use_kibibyte=True):
base, suffix = [(1000.,'B'),(1024.,'iB')][use_kibibyte]
for x in ['B'] + map(lambda x: x+suffix, list('kMGTP')):
if -base < num < base:
return "%3.1f %s" % (num, x)
num /= base
return "%3.1f %s" % (num, x)
P.S. Never trust a library that prints thousands with the K (uppercase) suffix :)
The HumanFriendly project helps with this.
import humanfriendly
humanfriendly.format_size(1024)
The above code will give 1KB as answer.
Examples can be found here.
Riffing on the snippet provided as an alternative to hurry.filesize(), here is a snippet that gives varying precision numbers based on the prefix used. It isn't as terse as some snippets, but I like the results.
def human_size(size_bytes):
"""
format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if size_bytes == 1:
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix)
This will do what you need in almost any situation, is customizable with optional arguments, and as you can see, is pretty much self-documenting:
from math import log
def pretty_size(n,pow=0,b=1024,u='B',pre=['']+[p+'i'for p in'KMGTPEZY']):
pow,n=min(int(log(max(n*b**pow,1),b)),len(pre)-1),n*b**pow
return "%%.%if %%s%%s"%abs(pow%(-pow-1))%(n/b**float(pow),pre[pow],u)
Example output:
>>> pretty_size(42)
'42 B'
>>> pretty_size(2015)
'2.0 KiB'
>>> pretty_size(987654321)
'941.9 MiB'
>>> pretty_size(9876543210)
'9.2 GiB'
>>> pretty_size(0.5,pow=1)
'512 B'
>>> pretty_size(0)
'0 B'
Advanced customizations:
>>> pretty_size(987654321,b=1000,u='bytes',pre=['','kilo','mega','giga'])
'987.7 megabytes'
>>> pretty_size(9876543210,b=1000,u='bytes',pre=['','kilo','mega','giga'])
'9.9 gigabytes'
This code is both Python 2 and Python 3 compatible. PEP8 compliance is an exercise for the reader. Remember, it's the output that's pretty.
Update:
If you need thousands commas, just apply the obvious extension:
def prettier_size(n,pow=0,b=1024,u='B',pre=['']+[p+'i'for p in'KMGTPEZY']):
r,f=min(int(log(max(n*b**pow,1),b)),len(pre)-1),'{:,.%if} %s%s'
return (f%(abs(r%(-r-1)),pre[r],u)).format(n*b**pow/b**float(r))
For example:
>>> pretty_units(987654321098765432109876543210)
'816,968.5 YiB'
Drawing from all the previous answers, here is my take on it. It's an object which will store the file size in bytes as an integer. But when you try to print the object, you automatically get a human readable version.
class Filesize(object):
"""
Container for a size in bytes with a human readable representation
Use it like this::
>>> size = Filesize(123123123)
>>> print size
'117.4 MB'
"""
chunk = 1024
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']
precisions = [0, 0, 1, 2, 2, 2]
def __init__(self, size):
self.size = size
def __int__(self):
return self.size
def __str__(self):
if self.size == 0: return '0 bytes'
from math import log
unit = self.units[min(int(log(self.size, self.chunk)), len(self.units) - 1)]
return self.format(unit)
def format(self, unit):
if unit not in self.units: raise Exception("Not a valid file size unit: %s" % unit)
if self.size == 1 and unit == 'bytes': return '1 byte'
exponent = self.units.index(unit)
quotient = float(self.size) / self.chunk**exponent
precision = self.precisions[exponent]
format_string = '{:.%sf} {}' % (precision)
return format_string.format(quotient, unit)
Modern Django have self template tag filesizeformat:
Formats the value like a human-readable file size (i.e. '13 KB', '4.1 MB', '102 bytes', etc.).
For example:
{{ value|filesizeformat }}
If value is 123456789, the output would be 117.7 MB.
More info: https://docs.djangoproject.com/en/1.10/ref/templates/builtins/#filesizeformat
I like the fixed precision of senderle's decimal version, so here's a sort of hybrid of that with joctee's answer above (did you know you could take logs with non-integer bases?):
from math import log
def human_readable_bytes(x):
# hybrid of https://stackoverflow.com/a/10171475/2595465
# with https://stackoverflow.com/a/5414105/2595465
if x == 0: return '0'
magnitude = int(log(abs(x),10.24))
if magnitude > 16:
format_str = '%iP'
denominator_mag = 15
else:
float_fmt = '%2.1f' if magnitude % 3 == 1 else '%1.2f'
illion = (magnitude + 1) // 3
format_str = float_fmt + ['', 'K', 'M', 'G', 'T', 'P'][illion]
return (format_str % (x * 1.0 / (1024 ** illion))).lstrip('0')
To get the file size in a human readable form, I created this function:
import os
def get_size(path):
size = os.path.getsize(path)
if size < 1024:
return f"{size} bytes"
elif size < pow(1024,2):
return f"{round(size/1024, 2)} KB"
elif size < pow(1024,3):
return f"{round(size/(pow(1024,2)), 2)} MB"
elif size < pow(1024,4):
return f"{round(size/(pow(1024,3)), 2)} GB"
>>> get_size("a.txt")
1.4KB
Here is an oneliner lambda without any imports to convert to human readable filesize. Pass the value in bytes.
to_human = lambda v : str(v >> ((max(v.bit_length()-1, 0)//10)*10)) +["", "K", "M", "G", "T", "P", "E"][max(v.bit_length()-1, 0)//10]
>>> to_human(1024)
'1K'
>>> to_human(1024*1024*3)
'3M'
How about a simple 2 liner:
def humanizeFileSize(filesize):
p = int(math.floor(math.log(filesize, 2)/10))
return "%.3f%s" % (filesize/math.pow(1024,p), ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB'][p])
Here is how it works under the hood:
Calculates log2(filesize)
Divides it by 10 to get the closest unit. (eg if size is 5000 bytes, the closest unit is Kb, so the answer should be X KiB)
Returns file_size/value_of_closest_unit along with unit.
It however doesn't work if filesize is 0 or negative (because log is undefined for 0 and -ve numbers). You can add extra checks for them:
def humanizeFileSize(filesize):
filesize = abs(filesize)
if (filesize==0):
return "0 Bytes"
p = int(math.floor(math.log(filesize, 2)/10))
return "%0.2f %s" % (filesize/math.pow(1024,p), ['Bytes','KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB'][p])
Examples:
>>> humanizeFileSize(538244835492574234)
'478.06 PiB'
>>> humanizeFileSize(-924372537)
'881.55 MiB'
>>> humanizeFileSize(0)
'0 Bytes'
NOTE - There is a difference between Kb and KiB. KB means 1000 bytes, whereas KiB means 1024 bytes. KB,MB,GB are all multiples of 1000, whereas KiB, MiB, GiB etc are all multiples of 1024. More about it here
What you're about to find below is by no means the most performant or shortest solution among the ones already posted. Instead, it focuses on one particular issue that many of the other answers miss.
Namely the case when input like 999_995 is given:
Python 3.6.1 ...
...
>>> value = 999_995
>>> base = 1000
>>> math.log(value, base)
1.999999276174054
which, being truncated to the nearest integer and applied back to the input gives
>>> order = int(math.log(value, base))
>>> value/base**order
999.995
This seems to be exactly what we'd expect until we're required to control output precision. And this is when things start to get a bit difficult.
With the precision set to 2 digits we get:
>>> round(value/base**order, 2)
1000 # K
instead of 1M.
How can we counter that?
Of course, we can check for it explicitly:
if round(value/base**order, 2) == base:
order += 1
But can we do better? Can we get to know which way the order should be cut before we do the final step?
It turns out we can.
Assuming 0.5 decimal rounding rule, the above if condition translates into:
resulting in
def abbreviate(value, base=1000, precision=2, suffixes=None):
if suffixes is None:
suffixes = ['', 'K', 'M', 'B', 'T']
if value == 0:
return f'{0}{suffixes[0]}'
order_max = len(suffixes) - 1
order = log(abs(value), base)
order_corr = order - int(order) >= log(base - 0.5/10**precision, base)
order = min(int(order) + order_corr, order_max)
factored = round(value/base**order, precision)
return f'{factored:,g}{suffixes[order]}'
giving
>>> abbreviate(999_994)
'999.99K'
>>> abbreviate(999_995)
'1M'
>>> abbreviate(999_995, precision=3)
'999.995K'
>>> abbreviate(2042, base=1024)
'1.99K'
>>> abbreviate(2043, base=1024)
'2K'
def human_readable_data_quantity(quantity, multiple=1024):
if quantity == 0:
quantity = +0
SUFFIXES = ["B"] + [i + {1000: "B", 1024: "iB"}[multiple] for i in "KMGTPEZY"]
for suffix in SUFFIXES:
if quantity < multiple or suffix == SUFFIXES[-1]:
if suffix == SUFFIXES[0]:
return "%d%s" % (quantity, suffix)
else:
return "%.1f%s" % (quantity, suffix)
else:
quantity /= multiple
This feature if available in Boltons which is a very handy library to have for most projects.
>>> bytes2human(128991)
'126K'
>>> bytes2human(100001221)
'95M'
>>> bytes2human(0, 2)
'0.00B'
Here's something I wrote for a different question...
Much like xApple's answer, this object will always print in a human-readable format. The difference is that it's also a proper int, so you can do math with it!
It passes the format specifier straight through to the number format and tacks on the suffix, so it's pretty much guaranteed that the requested length will be exceeded by two or three characters. I've never had a use for this code, so I haven't bothered to fix it!
class ByteSize(int):
_KB = 1024
_suffixes = 'B', 'KB', 'MB', 'GB', 'PB'
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
self.bytes = self.B = int(self)
self.kilobytes = self.KB = self / self._KB**1
self.megabytes = self.MB = self / self._KB**2
self.gigabytes = self.GB = self / self._KB**3
self.petabytes = self.PB = self / self._KB**4
*suffixes, last = self._suffixes
suffix = next((
suffix
for suffix in suffixes
if 1 < getattr(self, suffix) < self._KB
), last)
self.readable = suffix, getattr(self, suffix)
super().__init__()
def __str__(self):
return self.__format__('.2f')
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, super().__repr__())
def __format__(self, format_spec):
suffix, val = self.readable
return '{val:{fmt}} {suf}'.format(val=val, fmt=format_spec, suf=suffix)
def __sub__(self, other):
return self.__class__(super().__sub__(other))
def __add__(self, other):
return self.__class__(super().__add__(other))
def __mul__(self, other):
return self.__class__(super().__mul__(other))
def __rsub__(self, other):
return self.__class__(super().__sub__(other))
def __radd__(self, other):
return self.__class__(super().__add__(other))
def __rmul__(self, other):
return self.__class__(super().__rmul__(other))
Usage:
>>> size = 6239397620
>>> print(size)
5.81 GB
>>> size.GB
5.810891855508089
>>> size.gigabytes
5.810891855508089
>>> size.PB
0.005674699077644618
>>> size.MB
5950.353260040283
>>> size
ByteSize(6239397620)
In case someone is wondering, to convert #Sridhar Ratnakumar's answer back to bytes you could do the following:
import math
def format_back_to_bytes(value):
for power, unit in enumerate(["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]):
if value[-3:-1] == unit:
return round(float(value[:-3])*math.pow(2, 10*power))
Usage:
>>> format_back_to_bytes('212.4GiB')
228062763418
Here is an option using while:
def number_format(n):
n2, n3 = n, 0
while n2 >= 1e3:
n2 /= 1e3
n3 += 1
return '%.3f' % n2 + ('', ' k', ' M', ' G')[n3]
s = number_format(9012345678)
print(s == '9.012 G')
https://docs.python.org/reference/compound_stmts.html#while
Referencing Sridhar Ratnakumar's answer, updated to:
def formatSize(sizeInBytes, decimalNum=1, isUnitWithI=False, sizeUnitSeperator=""):
"""format size to human readable string"""
# https://en.wikipedia.org/wiki/Binary_prefix#Specific_units_of_IEC_60027-2_A.2_and_ISO.2FIEC_80000
# K=kilo, M=mega, G=giga, T=tera, P=peta, E=exa, Z=zetta, Y=yotta
sizeUnitList = ['','K','M','G','T','P','E','Z']
largestUnit = 'Y'
if isUnitWithI:
sizeUnitListWithI = []
for curIdx, eachUnit in enumerate(sizeUnitList):
unitWithI = eachUnit
if curIdx >= 1:
unitWithI += 'i'
sizeUnitListWithI.append(unitWithI)
# sizeUnitListWithI = ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']
sizeUnitList = sizeUnitListWithI
largestUnit += 'i'
suffix = "B"
decimalFormat = "." + str(decimalNum) + "f" # ".1f"
finalFormat = "%" + decimalFormat + sizeUnitSeperator + "%s%s" # "%.1f%s%s"
sizeNum = sizeInBytes
for sizeUnit in sizeUnitList:
if abs(sizeNum) < 1024.0:
return finalFormat % (sizeNum, sizeUnit, suffix)
sizeNum /= 1024.0
return finalFormat % (sizeNum, largestUnit, suffix)
and example output is:
def testKb():
kbSize = 3746
kbStr = formatSize(kbSize)
print("%s -> %s" % (kbSize, kbStr))
def testI():
iSize = 87533
iStr = formatSize(iSize, isUnitWithI=True)
print("%s -> %s" % (iSize, iStr))
def testSeparator():
seperatorSize = 98654
seperatorStr = formatSize(seperatorSize, sizeUnitSeperator=" ")
print("%s -> %s" % (seperatorSize, seperatorStr))
def testBytes():
bytesSize = 352
bytesStr = formatSize(bytesSize)
print("%s -> %s" % (bytesSize, bytesStr))
def testMb():
mbSize = 76383285
mbStr = formatSize(mbSize, decimalNum=2)
print("%s -> %s" % (mbSize, mbStr))
def testTb():
tbSize = 763832854988542
tbStr = formatSize(tbSize, decimalNum=2)
print("%s -> %s" % (tbSize, tbStr))
def testPb():
pbSize = 763832854988542665
pbStr = formatSize(pbSize, decimalNum=4)
print("%s -> %s" % (pbSize, pbStr))
def demoFormatSize():
testKb()
testI()
testSeparator()
testBytes()
testMb()
testTb()
testPb()
# 3746 -> 3.7KB
# 87533 -> 85.5KiB
# 98654 -> 96.3 KB
# 352 -> 352.0B
# 76383285 -> 72.84MB
# 763832854988542 -> 694.70TB
# 763832854988542665 -> 678.4199PB
This solution might also appeal to you, depending on how your mind works:
from pathlib import Path
def get_size(path = Path('.')):
""" Gets file size, or total directory size """
if path.is_file():
size = path.stat().st_size
elif path.is_dir():
size = sum(file.stat().st_size for file in path.glob('*.*'))
return size
def format_size(path, unit="MB"):
""" Converts integers to common size units used in computing """
bit_shift = {"B": 0,
"kb": 7,
"KB": 10,
"mb": 17,
"MB": 20,
"gb": 27,
"GB": 30,
"TB": 40,}
return "{:,.0f}".format(get_size(path) / float(1 << bit_shift[unit])) + " " + unit
# Tests and test results
>>> get_size("d:\\media\\bags of fun.avi")
'38 MB'
>>> get_size("d:\\media\\bags of fun.avi","KB")
'38,763 KB'
>>> get_size("d:\\media\\bags of fun.avi","kb")
'310,104 kb'

The price of using infinite_defaultdict

This is for me a heaven sent:
>>> from collections import defaultdict
>>> infinite_defaultdict = lambda: defaultdict(infinite_defaultdict)
>>> d = infinite_defaultdict()
>>> d['x']['y']['z'] = 10
by Raymond Hettinger on Twitter
Having that I don't see why we should do these anymore:
mydict = defaultdict(list)
mydict = defaultdict(lambda: defaultdict(float))
etc....
But I may be wrong.
Is there a case where you want to avoid infinite_defaultdict?
Update:
I tried to benchmark the time
from collections import defaultdict
def infdd():
infinite_defaultdict = lambda: defaultdict(infinite_defaultdict)
idd = infinite_defaultdict()
idd['x'] = [1,2,3]
def plaindd():
ddl = defaultdict(list)
ddl['x'] = [1,2,3]
if __name__ == '__main__':
import timeit
print "Infd = %.3f" % (timeit.timeit("infdd()",setup="from __main__ import infdd"))
print "Plaind = %.3f" % (timeit.timeit("plaindd()",setup="from __main__ import plaindd"))
Apparently infinite_dict is almost twice as slow than normal:
Infd = 0.632
Paind = 0.387
If you need the default value to be something other than a dict, then you should not use infinite_defaultdict. For example, if you want to count items or accumulate arrays of items, you'll want the default value to be a number or an array.
def group_by(key, items):
result = defaultdict(list)
for item in items:
result[key(item)].append(item)
return result
group_by(len, ['here', 'are', 'some', 'words'])
# -> { 3: ['are'] 4: ['here', 'some'], 5: ['words'] }

Parsing srt subtitles

I want to parse srt subtitles:
1
00:00:12,815 --> 00:00:14,509
Chlapi, jak to jde s
těma pracovníma světlama?.
2
00:00:14,815 --> 00:00:16,498
Trochu je zesilujeme.
3
00:00:16,934 --> 00:00:17,814
Jo, sleduj.
Every item into structure. With this regexs:
A:
RE_ITEM = re.compile(r'(?P<index>\d+).'
r'(?P<start>\d{2}:\d{2}:\d{2},\d{3}) --> '
r'(?P<end>\d{2}:\d{2}:\d{2},\d{3}).'
r'(?P<text>.*?)', re.DOTALL)
B:
RE_ITEM = re.compile(r'(?P<index>\d+).'
r'(?P<start>\d{2}:\d{2}:\d{2},\d{3}) --> '
r'(?P<end>\d{2}:\d{2}:\d{2},\d{3}).'
r'(?P<text>.*)', re.DOTALL)
And this code:
for i in Subtitles.RE_ITEM.finditer(text):
result.append((i.group('index'), i.group('start'),
i.group('end'), i.group('text')))
With code B I have only one item in array (because of greedy .*) and with code A I have empty 'text' because of no-greedy .*?
How to cure this?
Thanks
Why not use pysrt?
I became quite frustrated with srt libraries available for Python (often because they were heavyweight and eschewed language-standard types in favour of custom classes), so I've spent the last year or so working on my own srt library. You can get it at https://github.com/cdown/srt.
I tried to keep it simple and light on classes (except for the core Subtitle class, which more or less just stores the SRT block data). It can read and write SRT files, and turn noncompliant SRT files into compliant ones.
Here's a usage example with your sample input:
>>> import srt, pprint
>>> gen = srt.parse('''\
... 1
... 00:00:12,815 --> 00:00:14,509
... Chlapi, jak to jde s
... těma pracovníma světlama?.
...
... 2
... 00:00:14,815 --> 00:00:16,498
... Trochu je zesilujeme.
...
... 3
... 00:00:16,934 --> 00:00:17,814
... Jo, sleduj.
...
... ''')
>>> pprint.pprint(list(gen))
[Subtitle(start=datetime.timedelta(0, 12, 815000), end=datetime.timedelta(0, 14, 509000), index=1, proprietary='', content='Chlapi, jak to jde s\ntěma pracovníma světlama?.'),
Subtitle(start=datetime.timedelta(0, 14, 815000), end=datetime.timedelta(0, 16, 498000), index=2, proprietary='', content='Trochu je zesilujeme.'),
Subtitle(start=datetime.timedelta(0, 16, 934000), end=datetime.timedelta(0, 17, 814000), index=3, proprietary='', content='Jo, sleduj.')]
The text is followed by an empty line, or the end of file. So you can use:
r' .... (?P<text>.*?)(\n\n|$)'
Here's some code I had lying around to parse SRT files:
from __future__ import division
import datetime
class Srt_entry(object):
def __init__(self, lines):
def parsetime(string):
hours, minutes, seconds = string.split(u':')
hours = int(hours)
minutes = int(minutes)
seconds = float(u'.'.join(seconds.split(u',')))
return datetime.timedelta(0, seconds, 0, 0, minutes, hours)
self.index = int(lines[0])
start, arrow, end = lines[1].split()
self.start = parsetime(start)
if arrow != u"-->":
raise ValueError
self.end = parsetime(end)
self.lines = lines[2:]
if not self.lines[-1]:
del self.lines[-1]
def __unicode__(self):
def delta_to_string(d):
hours = (d.days * 24) \
+ (d.seconds // (60 * 60))
minutes = (d.seconds // 60) % 60
seconds = d.seconds % 60 + d.microseconds / 1000000
return u','.join((u"%02d:%02d:%06.3f"
% (hours, minutes, seconds)).split(u'.'))
return (unicode(self.index) + u'\n'
+ delta_to_string(self.start)
+ ' --> '
+ delta_to_string(self.end) + u'\n'
+ u''.join(self.lines))
srt_file = open("foo.srt")
entries = []
entry = []
for line in srt_file:
if options.decode:
line = line.decode(options.decode)
if line == u'\n':
entries.append(Srt_entry(entry))
entry = []
else:
entry.append(line)
srt_file.close()
splits = [s.strip() for s in re.split(r'\n\s*\n', text) if s.strip()]
regex = re.compile(r'''(?P<index>\d+).*?(?P<start>\d{2}:\d{2}:\d{2},\d{3}) --> (?P<end>\d{2}:\d{2}:\d{2},\d{3})\s*.*?\s*(?P<text>.*)''', re.DOTALL)
for s in splits:
r = regex.search(s)
print r.groups()
Here's a snippet I wrote which converts SRT files into dictionaries:
import re
def srt_time_to_seconds(time):
split_time=time.split(',')
major, minor = (split_time[0].split(':'), split_time[1])
return int(major[0])*1440 + int(major[1])*60 + int(major[2]) + float(minor)/1000
def srt_to_dict(srtText):
subs=[]
for s in re.sub('\r\n', '\n', srtText).split('\n\n'):
st = s.split('\n')
if len(st)>=3:
split = st[1].split(' --> ')
subs.append({'start': srt_time_to_seconds(split[0].strip()),
'end': srt_time_to_seconds(split[1].strip()),
'text': '<br />'.join(j for j in st[2:len(st)])
})
return subs
Usage:
import srt_to_dict
with open('test.srt', "r") as f:
srtText = f.read()
print srt_to_dict(srtText)

Python: File formatting

I have a for loop which references a dictionary and prints out the value associated with the key. Code is below:
for i in data:
if i in dict:
print dict[i],
How would i format the output so a new line is created every 60 characters? and with the character count along the side for example:
0001
MRQLLLISDLDNTWVGDQQALEHLQEYLGDRRGNFYLAYATGRSYHSARELQKQVGLMEP
0061
DYWLTAVGSEIYHPEGLDQHWADYLSEHWQRDILQAIADGFEALKPQSPLEQNPWKISYH
0121 LDPQACPTVIDQLTEMLKETGIPVQVIFSSGKDVDLLPQRSNKGNATQYLQQHLAMEPSQ
It's a finicky formatting problem, but I think the following code:
import sys
class EveryN(object):
def __init__(self, n, outs):
self.n = n # chars/line
self.outs = outs # output stream
self.numo = 1 # next tag to write
self.tll = 0 # tot chars on this line
def write(self, s):
while True:
if self.tll == 0: # start of line: emit tag
self.outs.write('%4.4d ' % self.numo)
self.numo += self.n
# wite up to N chars/line, no more
numw = min(len(s), self.n - self.tll)
self.outs.write(s[:numw])
self.tll += numw
if self.tll >= self.n:
self.tll = 0
self.outs.write('\n')
s = s[numw:]
if not s: break
if __name__ == '__main__':
sys.stdout = EveryN(60, sys.stdout)
for i, a in enumerate('abcdefgh'):
print a*(5+ i*5),
shows how to do it -- the output when running for demonstration purposes as the main script (five a's, ten b's, etc, with spaces in-between) is:
0001 aaaaa bbbbbbbbbb ccccccccccccccc dddddddddddddddddddd eeeeee
0061 eeeeeeeeeeeeeeeeeee ffffffffffffffffffffffffffffff ggggggggg
0121 gggggggggggggggggggggggggg hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh
0181 hhhhhhh
# test data
data = range(10)
the_dict = dict((i, str(i)*200) for i in range( 10 ))
# your loops as a generator
lines = ( the_dict[i] for i in data if i in the_dict )
def format( line ):
def splitter():
k = 0
while True:
r = line[k:k+60] # take a 60 char block
if r: # if there are any chars left
yield "%04d %s" % (k+1, r) # format them
else:
break
k += 60
return '\n'.join(splitter()) # join all the numbered blocks
for line in lines:
print format(line)
I haven't tested it on actual data, but I believe the code below would do the job. It first builds up the whole string, then outputs it a 60-character line at a time. It uses the three-argument version of range() to count by 60.
s = ''.join(dict[i] for i in data if i in dict)
for i in range(0, len(s), 60):
print '%04d %s' % (i+1, s[i:i+60])
It seems like you're looking for textwrap
The textwrap module provides two convenience functions, wrap() and
fill(), as well as TextWrapper, the class that does all the work, and
a utility function dedent(). If you’re just wrapping or filling one or
two text strings, the convenience functions should be good enough;
otherwise, you should use an instance of TextWrapper for efficiency.

Categories

Resources