This is my code, trying to convert the second field of the line from exponential into float.
outputrrd = processrrd.communicate()
(output, error) = outputrrd
output_lines = output.split('\n')
for line in output_lines:
m = re.search(r"(.*): ", line)
if m != None:
felder = line.split(': ')
epoch = felder[0].strip(':')
utc = epoch2normal(epoch).strip("\n")
#print felder[1]
data = float(felder[1])
float_data = data * 10000000
print float_data
resultslist.append( utc + ' ' + hostname + ' ' + float_data)
But, the program stops with this error:
File "/opt/omd/scripts/python/livestatus/rrdfetch-convert.py", line 156, in <module>
data = float(felder[1])
ValueError: invalid literal for float(): 6,0865000000e-01
Does anyone know the reason?
The easy way is replace! One simple example:
value=str('6,0865000000e-01')
value2=value.replace(',', '.')
float(value2)
0.60865000000000002
The reason is the use of comma in 6,0865000000e-01. This won't work because float() is not locale-aware. See PEP 331 for details.
Try locale.atof(), or replace the comma with a dot.
The float is correct, just use format to display it as you want, i.e.:
print(format(the_float, '.8f'))
I think it is useful to you:
def remove_exponent(value):
"""
>>>(Decimal('5E+3'))
Decimal('5000.00000000')
"""
decimal_places = 8
max_digits = 16
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return "{0:f}".format(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places, value)
Simply by casting string into float:
new_val = float('9.81E7')
This work for me, try it out.
def remove_exponent(value):
decial = value.split('e')
ret_val = format(((float(decial[0]))*(10**int(decial[1]))), '.8f')
return ret_val
I had a similar issue trying to convert from string in scientific/exponential notation to a float number (that can result also in exponential notation if too long)
num = '-8e-05'
def scientific_to_float(exponential):
split_word = 'e'
e_index = exponential.index('e')
base = float(exponential[:e_index])
exponent = float(exponential[e_index + 1:])
float_number = base * (10 ** exponent)
return float_number
scientific_to_float(num) # return -8e-05 float number
Related
Is there a (general) way to do locale-aware string formatting in Python using the .format() {:} syntax? I know of locale.format_string(), but this only accepts the old % syntax. {:n} exists, but only works as a replacement of {:d}, not for the other formats.
My current approach is below, which I expect will break for most non-trivial cases.
import locale
import string
class LocaleFormatter(string.Formatter):
def format_field(self, value, format_spec):
if format_spec[-1] not in 'eEfFgGdiouxXcrs': # types copied from locale._percent_re
return super().format_field(value, format_spec)
grouping = ',' in format_spec or '_' in format_spec
format_spec = '%' + format_spec.replace(',', '').replace('_', '')
return locale.format_string(format_spec, value, grouping)
locale.setlocale(locale.LC_ALL, '')
fmt = LocaleFormatter()
fmt.format("Length: {:,.2f} mm, width: {:,.2f} mm", 1234.56, 7890) # expected format is 1.234,56 for most locales
You can achieve what you want converting the float to decimal and setting the precision as well as manually adding leading spaces if needed:
import decimal
import locale
import re
import string
class LocaleFormatter(string.Formatter):
def format_field(self, value, format_spec):
if format_spec[-1] not in 'eEfFgGdiouxXcrs': # types copied from locale._percent_re
return super().format_field(value, format_spec)
grouping = ',' in format_spec or '_' in format_spec
prec_re = re.match(r',?(?P<spec>(?P<width>\d+)?(.(?P<precision>\d+))?)?[eEfFgGdiouxXcrs]', format_spec)
if prec_re is not None and prec_re.group('spec') is not None:
space_len = prec_re.group('width')
after_dot = prec_re.group('precision')
if after_dot is not None:
pre_dot_value_len = len(str(int(value)))
ctx = decimal.Context(prec=int(after_dot) + pre_dot_value_len)
# prec turned out to be the length of the decimal repr, not precision
value = ctx.create_decimal(value)
if space_len is not None:
after_dot = 0 if after_dot is None else int(after_dot)
pre_dot = len(str(value))
how_many = pre_dot - after_dot - 1 # -1 for the dot character
if how_many > 0:
format_spec = how_many * ' ' + format_spec
format_spec = '%' + format_spec.replace(',', '').replace('_', '')
return locale.format_string(format_spec, value, grouping)
locale.setlocale(locale.LC_ALL, 'DE-DE')
fmt = LocaleFormatter()
res = fmt.format("Length: {:,.2f} mm, width: {:,2f} mm", 1234.567878, 7890) # expected format is 1.234,56 for most locales
print(res)
Which results in:
Length: 1.234,57 mm, width: 7.890,000000 mm
Please note that the value you suggested as the correct one after formatting is not properly rounded. The one above - is.
I am working on code that should take a floating point value and convert it into the following tuple format:
(+ or - sign, significand a string of 54 bits, exponent).
For one of my tests v = 6.2831853072, I am getting a slightly wrong answer but this code is passing all other tests. I should also note that I am new to python so my code is not the most efficient.
What I should be getting as a correct response:
('+', '1.1001001000011111101101010100010001001000011011100000', 2)
What I am actually generating which is wrong:
('+', '1.0000001000011111101101010100010001001000011011100000', 2)
Any perspective would be appreciated. Of course any code optimization recommendations is welcomed too.
Code:
v = 6.2831853072
vhex = v.hex()
# print(v.hex())
if v == 0.0:
s_sign = '+'
v_exp = 0
fp = '0.0000000000000000000000000000000000000000000000000000'
elif str(vhex[0]) == '-':
s_sign = '-'
signand = vhex.split('p')
signand = signand[0][3:]
# print(signand[2:])
v_exp = vhex.split('p')
v_exp = int((v_exp[1]))
integer = int(signand[2:], 16)
fp = format(integer, '0>52b')
fp = vhex[3:5] + fp
else:
s_sign = '+'
signand = vhex.split('p')
signand = signand[0][3:]
print(signand[0][3:])
v_exp = vhex.split('p')
v_exp = int((v_exp[1]))
integer = int(signand[2:], 16)
fp = format(integer, '0>52b')
fp = vhex[2:4] + fp
print(integer)
print(vhex)
tt = (s_sign, fp,v_exp)
tt
I could see that issue is when the number is positive
integer = int(signand[2:], 16)
The index range [2:0] will vomit the first digit, it must be signand[1:0]
Here is the implementation without using the index range, this can handle 0.0 as well
def floating_point_bin(v):
vhex = v.hex()
vhex_parts = vhex.split('0x')
signand, v_exp = vhex_parts[-1].split('p')
sign, precision = signand.split('.')
signand_int = int(precision, 16)
fp = format(signand_int, '0>52b')
s_signif = sign +'.'+ fp
v_exp = int(v_exp)
if vhex_parts[0] == '':
s_sign = '+'
else:
s_sign = '-'
return s_sign, s_signif, v_exp
I found a Python script that I'm trying to convert to Lua. I believe I have it just about converted, but the code isn't quite working properly, so I need assistance as I do not know Python at all, and can only guess at the intentions. This is merely a color converter to convert RGB color to xterm 256. The table is quite huge, so I've truncated it for ease of reading.
Python code:
import sys, re
CLUT = [ # color look-up table
# 8-bit, RGB hex
# Primary 3-bit (8 colors). Unique representation!
('00', '000000'),
('01', '800000'),
('02', '008000'),
('03', '808000'),
('04', '000080'),
('05', '800080'),
('06', '008080'),
('07', 'c0c0c0'),
]
def _str2hex(hexstr):
return int(hexstr, 16)
def _strip_hash(rgb):
# Strip leading `#` if exists.
if rgb.startswith('#'):
rgb = rgb.lstrip('#')
return rgb
def _create_dicts():
short2rgb_dict = dict(CLUT)
rgb2short_dict = {}
for k, v in short2rgb_dict.items():
rgb2short_dict[v] = k
return rgb2short_dict, short2rgb_dict
def short2rgb(short):
return SHORT2RGB_DICT[short]
def print_all():
""" Print all 256 xterm color codes.
"""
for short, rgb in CLUT:
sys.stdout.write('\033[48;5;%sm%s:%s' % (short, short, rgb))
sys.stdout.write("\033[0m ")
sys.stdout.write('\033[38;5;%sm%s:%s' % (short, short, rgb))
sys.stdout.write("\033[0m\n")
print "Printed all codes."
print "You can translate a hex or 0-255 code by providing an argument."
def rgb2short(rgb):
""" Find the closest xterm-256 approximation to the given RGB value.
#param rgb: Hex code representing an RGB value, eg, 'abcdef'
#returns: String between 0 and 255, compatible with xterm.
>>> rgb2short('123456')
('23', '005f5f')
>>> rgb2short('ffffff')
('231', 'ffffff')
>>> rgb2short('0DADD6') # vimeo logo
('38', '00afd7')
"""
rgb = _strip_hash(rgb)
incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
# Break 6-char RGB code into 3 integer vals.
parts = [ int(h, 16) for h in re.split(r'(..)(..)(..)', rgb)[1:4] ]
res = []
for part in parts:
i = 0
while i < len(incs)-1:
s, b = incs[i], incs[i+1] # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1: closest = s
else: closest = b
res.append(closest)
break
i += 1
#print '***', res
res = ''.join([ ('%02.x' % i) for i in res ])
equiv = RGB2SHORT_DICT[ res ]
#print '***', res, equiv
return equiv, res
RGB2SHORT_DICT, SHORT2RGB_DICT = _create_dicts()
#---------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod()
if len(sys.argv) == 1:
print_all()
raise SystemExit
arg = sys.argv[1]
if len(arg) < 4 and int(arg) < 256:
rgb = short2rgb(arg)
sys.stdout.write('xterm color \033[38;5;%sm%s\033[0m -> RGB exact \033[38;5;%sm%s\033[0m' % (arg, arg, arg, rgb))
sys.stdout.write("\033[0m\n")
else:
short, rgb = rgb2short(arg)
sys.stdout.write('RGB %s -> xterm color approx \033[38;5;%sm%s (%s)' % (arg, short, short, rgb))
sys.stdout.write("\033[0m\n")
And my nearly complete translated Lua code:
CLUT = {
-- Primary 3-bit (8 colors). Unique representation!
['00'] = '000000',
['01'] = '800000',
['02'] = '008000',
['03'] = '808000',
['04'] = '000080',
['05'] = '800080',
['06'] = '008080',
['07'] = 'c0c0c0',
}
function _str2hex(hexstr)
return tonumber(hexstr, 16)
end
function _strip_hash(rgb)
-- Strip leading # if exists
return rgb:gsub("^#", "")
end
function _create_dicts()
short2rgb_dict = CLUT
rgb2short_dict = {}
for k,v in pairs(short2rgb_dict) do
rgb2short_dict[v] = k
end
return rgb2short_dict, short2rgb_dict
end
function short2rgb(short)
return short2rgb_dict[short]
end
function rgb2short(rgb)
-- Find closest xterm-256 approximation to the given RGB value
_create_dicts()
rgb = _strip_hash(rgb)
local res = ""
local equiv = ""
local incs = {"0x00", "0x5f", "0x87", "0xaf", "0xd7", "0xff"}
for part in string.gmatch(rgb, "(..)") do
part = tonumber(part, 16)
i = 1
while i < #incs - 1 do
s, b = tonumber(incs[i]), tonumber(incs[i+1])
if s <= part and part <= b then
s1 = math.abs(s - part)
b1 = math.abs(b - part)
end
if s1 < b1 then
closest = s
else
closest = b
res = res .. closest
break
end
i = i + 1
end
end
equiv = rgb2short_dict[res]
return equiv, res
end
I realize that I'm missing the printing portion of the code, but I wasn't sure if that was at all relevant, and I know some of the code I've translated is not correct at all, as the script would be working otherwise. The failures I get are with the rgb2short function with it not returning the proper equiv and res values. How far off am I with my revision? What changes do I need to make to make it absolutely work?
I wound up figuring it out on my own after some hardcore trial and error. The function rgb2short should have been:
function rgb2short(rgb)
-- Find closest xterm-256 approximation to the given RGB value
_create_dicts()
rgb = _strip_hash(rgb)
local res = ""
local equiv = ""
local incs = {"0x00", "0x5f", "0x87", "0xaf", "0xd7", "0xff"}
for part in string.gmatch(rgb, "(..)") do
part = tonumber(part, 16)
i = 1
while i < #incs-1 do
s, b = tonumber(incs[i]), tonumber(incs[i+1])
if s <= part and part <= b then
s1 = math.abs(s - part)
b1 = math.abs(b - part)
--break
--end
if s1 < b1 then
closest = s
else
closest = b
end
res = res .. string.format("%02x", closest)
break
end
i = i + 1
end
end
equiv = rgb2short_dict[res]
return equiv, res
end
Can't figure out by debugging, why I got b in front of my hidden string?
I get this string in my result:
'1101000011001010110110001101100011'
def retr(filename):
img = Image.open(filename)
binary = ''
if img.mode in ('RGBA'):
img = img.convert('RGBA')
datas = img.getdata()
for item in datas:
digit = decode(rgb2hex(item[0], item[1], item[2]))
if digit == None:
pass
else:
binary = binary + digit
if (binary[-16:] == '1111111111111110'):
# print("Success")
return bin2str(binary[:-16])
return str(bin2str(binary))
return "Incorrect Image Mode, Couldn't Retrieve"
But result in console is: b'hello'. Where is b from?
Doing some pre fucntion before retr():
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def hex2rgb(hexcode):
return int(hexcode[1:3], 16), int(hexcode[3:5], 16), int(hexcode[5:7], 16)
def str2bin(message):
binary = bin(int(binascii.hexlify(message.encode("ascii")), 16))
return binary[2:]
def bin2str(binary):
message = binascii.unhexlify('%x' % (int('0b' + binary, 2)))
return message
help ,please, to catch that b..
x = b'hello'
print(x)
b'hello'
print(x.decode('utf-8'))
'hello'
I hope this shows enough so that you understand how to get it back to a utf-8 string
bin2str is returning a byte literal. You can use .decode() to return a string instead.
def bin2str(binary):
message = binascii.unhexlify('%x' % (int('0b' + binary, 2)))
return message.decode("utf-8") # or encoding of choice
I believe that any byte string will include: "b'" before the string to indicate it came from a binary value. After you convert the binary value to the string, you can do a replace function:
newstring = message.replace("b", "")
newstring = message.replace("'", "")
I'm a beginner, so sorry if this is obvious.
I'm at a loss here. I've been trying to make an encryption/decryption program, but I keep getting this error. I'm aware that there are other questions on this issue, but I still can't resolve it.
Encryptor:
import binascii
def text_to_bits(text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int(binascii.hexlify(text.encode(encoding, errors)), 16))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):
n = int(bits, 2)
return int2bytes(n).decode(encoding, errors)
def int2bytes(i):
hex_string = '%x' % i
n = len(hex_string)
return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
#ENCRYPTION ALGORITHM
algorithm = 61913299
#ASCII ----> NUMBERS
raw = input("Enter text to encrypt:")
binary = text_to_bits(raw)
binary = int(binary)
algorithm = int(algorithm)
encrypted = binary * algorithm
encrypted = str(encrypted)
print(encrypted)
print("Done")
Decryptor:
import sys
import time
def to_bin(string):
res = ''
for char in string:
tmp = bin(ord(char))[2:]
tmp = '%08d' %int(tmp)
res += tmp
return res
def to_str(string):
res = ''
for idx in range(len(string)/8):
tmp = chr(int(string[idx*8:(idx+1)*8], 2))
res += tmp
return res
incorrectpasswords = 0
password=("password")
originpassword = password
x = 1
algorithm = 61913299
while x==1:
passwordattempt =input("Enter Password:")
if passwordattempt == password:
print("Correct")
x = 2
if passwordattempt!= password:
print("Incorrect")
incorrectpasswords = incorrectpasswords + 1
if incorrectpasswords > 2:
if x == 1:
print("Too many wrong attempts, please try again in one minute.")
time.sleep(60)
encrypted = input("Enter numbers to unencrypt:")
encrypted = int(encrypted)
one = encrypted / algorithm
size = sys.getsizeof(one)
one = str(one).zfill(size + 1)
one = int(one)
unencrypted = to_str(one)
x = unencrypted
For the conversion between binary and text, and text and binary, I used some code I found online.
I believe your code is not working because:
one = encrypted / algorithm
generates a float
to turn your string back into a number you should apply
eval(one)
or
float(one)
instead of
int(one)
(You can also turn it into an int after applying float or eval)
alternatively you might be able to get it by using integer division // as opposed to / , which will make one the type int by flooring the decimal result of the divison, but I'm not sure if that is the behavior you are looking for
Example in python 3 shell:
>>> import sys
>>> one = 15/25
>>> size = sys.getsizeof(one)
>>> one = str(one).zfill(size+1)
>>> one
'00000000000000000000000.6'
>>> type(one)
<class 'str'>
>>> one = eval(one)
>>> one
0.6
>>> type(one)
<class 'float'>