C CRC Calculation method rewritten in Python seems wrong - python

Note: CRC16 in Python topic does not solve my problem. Deploying python package into server is restricted. Also my need is not a library to implement, I already wrote a function for this. My need is just correcting my function.
I need to implement a CRC calculator into our project for checking incoming data consistency. There is an example C snippet in device documents. I try to rewrite it in Python but I can't get right results in no way. For instance:
This is the data to be CRC calculated: 1f120f0c110e2103cc041f8ab002ea38040015440000000000000000083f
The calculated CRC value should be 9911.
The original C snippet:
static const U16 crctab16 [] =
{
0X0000, 0X1189, 0X2312, 0X329B, 0X4624, 0X57AD, 0X6536, 0X74BF,
0X8C48, 0X9DC1, 0XAF5A, 0XBED3, 0XCA6C, 0XDBE5, 0XE97E, 0XF8F7,
0X1081, 0X0108, 0X3393, 0X221A, 0X56A5, 0X472C, 0X75B7, 0X643E,
0X9CC9, 0X8D40, 0XBFDB, 0XAE52, 0XDAED, 0XCB64, 0XF9FF, 0XE876,
0X2102, 0X308B, 0X0210, 0X1399, 0X6726, 0X76AF, 0X4434, 0X55BD,
0XAD4A, 0XBCC3, 0X8E58, 0X9FD1, 0XEB6E, 0XFAE7, 0XC87C, 0XD9F5,
0X3183, 0X200A, 0X1291, 0X0318, 0X77A7, 0X662E, 0X54B5, 0X453C,
0XBDCB, 0XAC42, 0X9ED9, 0X8F50, 0XFBEF, 0XEA66, 0XD8FD, 0XC974,
0X4204, 0X538D, 0X6116, 0X709F, 0X0420, 0X15A9, 0X2732, 0X36BB,
0XCE4C, 0XDFC5, 0XED5E, 0XFCD7, 0X8868, 0X99E1, 0XAB7A, 0XBAF3,
0X5285, 0X430C, 0X7197, 0X601E, 0X14A1, 0X0528, 0X37B3, 0X263A,
0XDECD, 0XCF44, 0XFDDF, 0XEC56, 0X98E9, 0X8960, 0XBBFB, 0XAA72,
0X6306, 0X728F, 0X4014, 0X519D, 0X2522, 0X34AB, 0X0630, 0X17B9,
0XEF4E, 0XFEC7, 0XCC5C, 0XDDD5, 0XA96A, 0XB8E3, 0X8A78, 0X9BF1,
0X7387, 0X620E, 0X5095, 0X411C, 0X35A3, 0X242A, 0X16B1, 0X0738,
0XFFCF, 0XEE46, 0XDCDD, 0XCD54, 0XB9EB, 0XA862, 0X9AF9, 0X8B70,
0X8408, 0X9581, 0XA71A, 0XB693, 0XC22C, 0XD3A5, 0XE13E, 0XF0B7,
0X0840, 0X19C9, 0X2B52, 0X3ADB, 0X4E64, 0X5FED, 0X6D76, 0X7CFF,
0X9489, 0X8500, 0XB79B, 0XA612, 0XD2AD, 0XC324, 0XF1BF, 0XE036,
0X18C1, 0X0948, 0X3BD3, 0X2A5A, 0X5EE5, 0X4F6C, 0X7DF7, 0X6C7E,
0XA50A, 0XB483, 0X8618, 0X9791, 0XE32E, 0XF2A7, 0XC03C, 0XD1B5,
0X2942, 0X38CB, 0X0A50, 0X1BD9, 0X6F66, 0X7EEF, 0X4C74, 0X5DFD,
0XB58B, 0XA402, 0X9699, 0X8710, 0XF3AF, 0XE226, 0XD0BD, 0XC134,
0X39C3, 0X284A, 0X1AD1, 0X0B58, 0X7FE7, 0X6E6E, 0X5CF5, 0X4D7C,
0XC60C, 0XD785, 0XE51E, 0XF497, 0X8028, 0X91A1, 0XA33A, 0XB2B3,
0X4A44, 0X5BCD, 0X6956, 0X78DF, 0X0C60, 0X1DE9, 0X2F72, 0X3EFB,
0XD68D, 0XC704, 0XF59F, 0XE416, 0X90A9, 0X8120, 0XB3BB, 0XA232,
0X5AC5, 0X4B4C, 0X79D7, 0X685E, 0X1CE1, 0X0D68, 0X3FF3, 0X2E7A,
0XE70E, 0XF687, 0XC41C, 0XD595, 0XA12A, 0XB0A3, 0X8238, 0X93B1,
0X6B46, 0X7ACF, 0X4854, 0X59DD, 0X2D62, 0X3CEB, 0X0E70, 0X1FF9,
0XF78F, 0XE606, 0XD49D, 0XC514, 0XB1AB, 0XA022, 0X92B9, 0X8330,
0X7BC7, 0X6A4E, 0X58D5, 0X495C, 0X3DE3, 0X2C6A, 0X1EF1, 0X0F78,
};
// calculate 16 bits CRC of the given length data.
U16 GetCrc16(const U8* pData, int nLength)
{
U16 fcs = 0xffff; // Initialize
while(nLength>0)
{
fcs = (fcs >> 8) ^ crctab16[(fcs ^ *pData) & 0xff];
nLength--;
pData++;
}
return ~fcs; // Negate
}
// Check whether the 16 bits CRC of the given length data is right.
BOOL IsCrc16Good(const U8* pData, int nLength)
{
U16 fcs = 0xffff;
// Initialize
while(nLength>0)
{
fcs = (fcs >> 8) ^ crctab16[(fcs ^ *pData) & 0xff];
nLength--;
pData++;
}
return (fcs == 0xf0b8); // 0xf0b8 is CRC-ITU 的"Magic Value"
}
But the rewriten code in Python below is computing the crc value as -26351
CRC16_TABLE = [
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78,
]
def check(data):
_str = ''
i = 0
for i in range(len(data) / 2):
_str = _str + data[2 * i:2 * i + 2] + ','
return _str[0:-1]
def compute(data):
crc = 0xffff
for byte in check(data).split(","):
crc = ((crc >> 8) ^ CRC16_TABLE[(crc ^ (int(byte, 16) & 0xff)) & 0xff])
return ~crc
Does anybody have an idea?

The problem is the return statement in compute. You want this to be a 16-bit value. You could mask the result, or use the following:
return 0xffff - crc
And, as has been noted, format the result as a hex number if you want it to appear as 9911. The decimal value is 39185.

-26351 is a (signed) integer written in decimal notation, while 9911 is in hexadecimal notation. Your function is actually correct:
>>> import struct
>>> struct.pack('>h', -26351)
'\x99\x11'

here is the python plugin to crc calculation.
it was written with c programming language
https://github.com/giraysam/crc-for-python.
in python script.
import crc
print crc.getcrc16("1f120f0c110e2103cc041f8ab002ea38040015440000000000000000083f") // output "9911"

I have written the following code for calculating crc8:
acklist = [] # a list of your byte string data
x = 0xff
crc = 0
for i in range(len(acklist)):
crc += int(acklist[i], 16) & x
print(crc)
crc = ~crc
crc += 1
crc1 = crc >> 8 & x
crc2 = crc & x

Related

ZIP's CRC-32 for encryption isn't quite zlib's crc32... why?

I'm writing my own unzip code, and (from trial and error, no understanding) it looks like the CRC-32 algorithm on the one byte that decryption requires doesn't quite match up with zlib's. To convert from one to the other:
def crc32(ch, crc):
crc = zlib.crc32(bytes([~ch & 0xFF]), crc)
return (~crc & 0xFF000000) + (crc & 0x00FFFFFF)
Why is this? (/ Am I wrong?)
Edit: the reason why I think there is at least the possibility of me being right, at https://github.com/uktrade/stream-unzip/blob/d23400028abbe3b0d7e1951cb562cd0541bfc960/stream_unzip.py#L89 I use the above successfully to decrypt encrypted ZIP files
def decrypt(chunks):
key_0 = 305419896
key_1 = 591751049
key_2 = 878082192
def crc32(ch, crc):
crc = zlib.crc32(bytes([~ch & 0xFF]), crc)
return (~crc & 0xFF000000) + (crc & 0x00FFFFFF)
def update_keys(byte):
nonlocal key_0, key_1, key_2
key_0 = crc32(byte, key_0)
key_1 = (key_1 + (key_0 & 0xFF)) & 0xFFFFFFFF
key_1 = ((key_1 * 134775813) + 1) & 0xFFFFFFFF
key_2 = crc32(key_1 >> 24, key_2)
def decrypt(chunk):
chunk = bytearray(chunk)
for i, byte in enumerate(chunk):
temp = key_2 | 2
byte ^= ((temp * (temp ^ 1)) >> 8) & 0xFF
update_keys(byte)
chunk[i] = byte
return chunk
yield_all, _, get_num, _ = get_byte_readers(chunks)
for byte in password:
update_keys(byte)
if decrypt(get_num(12))[11] != mod_time >> 8:
raise ValueError('Incorrect password')
for chunk in yield_all():
yield decrypt(chunk)
However, if I replace the crc32 function above with just calling zlib's, it doesn't (e.g. it will complain about an incorrect password)
Ok, you're not completely wrong. It is indeed the same CRC-32 algorithm, but without the pre and post-processing (inverting the CRC coming in and going out). It is truly odd code that is trying to replicate that with the zlib.crc32 function. All you need is this:
def crc32(ch, crc):
return ~zlib.crc32(bytes([ch]), ~crc) & 0xffffffff

Decrypt Fernet Encrypted Text(PYTHON) in SWIFT

I have generated an Encrypted Text is Python using cryptography
from cryptography.fernet import Fernet
message = "my deep dark secret".encode()
f = Fernet(key)
encrypted = f.encrypt(message)
# decrypting
from cryptography.fernet import Fernet
encrypted = b"...encrypted bytes..."
f = Fernet(key)
decrypted = f.decrypt(encrypted)
ENCRYPTION INFO:
KEY: b'3b-Nqg6ry-jrAuDyVjSwEe8wrdyEPQfPuOQNH1q5olE='
ENC_MESSAGE: b'gAAAAABhBRBGKSwa7AluNJYhwWaHrQGwAA8UpMH8Wtw3tEoTD2E_-nbeoAvxbtBpFiC0ZjbVne_ZetFinKSyMjxwWaPRnXVSVqz5QqpUXp6h-34_TL7BaDs='
Now I'm trying to Decrypt it in Swift but to no luck.
So Far I've Tried CryptoSwift with the following:
func testdec(){
let str = "3b-Nqg6ry-jrAuDyVjSwEe8wrdyEPQfPuOQNH1q5olE="
let ba = "gAAAAABhBRBGKSwa7AluNJYhwWaHrQGwAA8UpMH8Wtw3tEoTD2E_-nbeoAvxbtBpFiC0ZjbVne_ZetFinKSyMjxwWaPRnXVSVqz5QqpUXp6h-34_TL7BaDs="
let encodedString = Base64FS.decodeString(str: String(str.utf8))
print(encodedString.count)
let first4 = String(ba.prefix(25))
let start = first4.index(first4.startIndex, offsetBy: 9)
let end = first4.index(first4.endIndex, offsetBy: 0)
let iv = String(first4[start..<end])
let starta = ba.index(ba.startIndex, offsetBy: 25)
let enda = ba.index(ba.endIndex, offsetBy: -32)
let cipher_text = String(ba[starta..<enda])
let cipher_text_bt: [UInt8] = [UInt8](base64: cipher_text)
print(cipher_text)
print(iv)
let cipher_text_bta: [UInt8] = [UInt8](base64: ba)
// print(encodedString.bytes.count)
// let key_bta: [UInt8] = [UInt8](base64: "RgSADaf8w4v9vokuncyzWRbP5hkdhXSETdxIHLDHtKg=")
// let iv_bt: [UInt8] = [UInt8](base64: "7KUDrsPmb28KQqOWv00KXw==")
// let cipher_text_bt: [UInt8] = [UInt8](base64: "gAAAAABhBQ837KUDrsPmb28KQqOWv00KX2KjsP2ar6lHLqIPUKSvF1WHiruquG-tiAEkrCZZbm-lFR9ZwxsqVcXovmQ3Hv6pWw==")
do{
print("A")
let aes = try AES(key: encodedString, blockMode: CBC(iv: iv.bytes), padding: .pkcs7)
print("B")
let cipherTexta = try aes.decrypt(cipher_text_bt)
print(cipherTexta)
}catch{
print(error)
}
}
OUTPUT:
16
WaHrQGwAA8UpMH8Wtw3tEoTD2E_-nbeoAvxbtBpFiC0ZjbVne_ZetFinKSyMjxw
RBGKSwa7AluNJYhw
A
B
invalidData
Any Help would be appreciated
I've managed to get your cipher text decrypted using only Apple provided sources. If you support iOS 13 and up, I suggest you use CryptoKit to verify the HMAC, but for now, I've adopted a full CommonCrypto solution.
First a minor extension to create Data from base64 URL strings.
import Foundation
import CommonCrypto
extension Data {
init?(base64URL base64: String) {
var base64 = base64
.replacingOccurrences(of: "-", with: "+")
.replacingOccurrences(of: "_", with: "/")
if base64.count % 4 != 0 {
base64.append(String(repeating: "=", count: 4 - base64.count % 4))
}
self.init(base64Encoded: base64)
}
}
The decrypt function is a bit obscure, but it supports the very old CommonCrypto syntax. withUnsafeBytes syntax would be cleaner, but this is a quick workaround.
func decrypt(ciphertext: Data, key: Data, iv: Data) -> Data {
var decryptor: CCCryptorRef?
defer {
CCCryptorRelease(decryptor)
}
var key = Array(key)
var iv = Array(iv)
var ciphertext = Array(ciphertext)
CCCryptorCreate(CCOperation(kCCDecrypt), CCAlgorithm(kCCAlgorithmAES), CCOptions(kCCOptionPKCS7Padding), &key, key.count, &iv, &decryptor)
var outputBytes = [UInt8](repeating: 0, count: CCCryptorGetOutputLength(decryptor, ciphertext.count, false))
CCCryptorUpdate(decryptor, &ciphertext, ciphertext.count, &outputBytes, outputBytes.count, nil)
var movedBytes = 0
var finalBytes = [UInt8](repeating: 0, count: CCCryptorGetOutputLength(decryptor, 0, true))
CCCryptorFinal(decryptor, &finalBytes, finalBytes.count, &movedBytes)
return Data(outputBytes + finalBytes[0 ..< movedBytes])
}
Then the HMAC. I suggest you use CryptoKit if you can. This function is of course fixed, there might be ways to make this dynamic. For Fernet however, only SHA256 is supported.
func verifyHMAC(_ mac: Data, authenticating data: Data, using key: Data) -> Bool {
var data = Array(data)
var key = Array(key)
var macOut = [UInt8](repeating: 0, count: Int(CC_SHA256_DIGEST_LENGTH))
CCHmac(CCHmacAlgorithm(kCCHmacAlgSHA256), &key, key.count, &data, data.count, &macOut)
return Array(mac) == macOut
}
All of that together comes down to the following code. Note that I do not check the version and/or timestamp, which should be done according to the spec.
let fernetKey = Data(base64URL: "3b-Nqg6ry-jrAuDyVjSwEe8wrdyEPQfPuOQNH1q5olE=")!
let signingKey = fernetKey[0 ..< 16]
let cryptoKey = fernetKey[16 ..< fernetKey.count]
let fernetToken = Data(base64URL: "gAAAAABhBRBGKSwa7AluNJYhwWaHrQGwAA8UpMH8Wtw3tEoTD2E_-nbeoAvxbtBpFiC0ZjbVne_ZetFinKSyMjxwWaPRnXVSVqz5QqpUXp6h-34_TL7BaDs=")!
let version = Data([fernetToken[0]])
let timestamp = fernetToken[1 ..< 9]
let iv = fernetToken[9 ..< 25]
let ciphertext = fernetToken[25 ..< fernetToken.count - 32]
let hmac = fernetToken[fernetToken.count - 32 ..< fernetToken.count]
let plainText = decrypt(ciphertext: ciphertext, key: cryptoKey, iv: iv)
print(plainText, String(data: plainText, encoding: .utf8) ?? "Non utf8")
print(verifyHMAC(hmac, authenticating: version + timestamp + iv + ciphertext, using: signingKey))

Using ctypes to call a function that takes char ** because it writes both the buffer and the pointer

I'm trying to call iconv(3) from a Python (3) program, using ctypes. The C type signature of iconv is
size_t iconv(iconv_t cd,
char **inptr, size_t *inbytesleft,
char **outptr, size_t *outbytesleft);
and you are expected to call it like this:
char *inp = "abcdef";
char outbuf[16];
char *outp = outbuf;
size_t ibytes = strlen(inbuf);
size_t obytes = sizeof outbuf;
size_t rv = iconv(cd, &inp, &ibytes, &outp, &obytes);
It will write to outbuf, obviously, and it will also modify all four of the variables inp, outp, ibytes, and obytes to indicate how far it got with the conversion before running into a problem (if any). It guarantees not to write to the input string, despite that not being const.
Now, naively, you reflect that in ctypes like this:
iconv = libc.iconv
iconv.restype = ctypes.c_size_t
iconv.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_char_p),
ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char_p),
ctypes.POINTER(ctypes.c_size_t)]
(iconv_t is a typedef for void * in the C library I'm testing on) but when I try to call that, I get errors:
>>> obuf = ctypes.create_string_buffer(16)
>>> obuflen = ctypes.c_size_t(16)
>>> iconv(utf8_to_utf16,
... ctypes.byref(ctypes.c_char_p(b"abcdef")),
... ctypes.byref(ctypes.c_size_t(6)),
... ctypes.byref(obuf),
... ctypes.byref(obuflen))
ArgumentError: argument 4: <class 'TypeError'>: expected LP_c_char_p
instance instead of pointer to c_char_Array_16
Trying to explicitly convert obuf to c_char_p doesn't work either:
>>> optr = ctypes.c_char_p(obuf)
TypeError: bytes or integer address expected instead of c_char_Array_16 instance
These type names it's using in the error messages don't appear in the manual, and I'm pretty well stumped. What's the right way to go about this?
(If you are wondering why I would want to do this instead of using Python's built-in encoding converters, the short version is because Python's converters don't support the same set of encodings as [GNU] iconv, nor do they have the //TRANSLIT feature.)
Below is code which I used to find chars which are converted to '"' with translit. Sorry it is not very polished but I hope it could be still usable for somebody. :)
I needed to run it under python2.6 so I tried to wrote it a little more universal. (2.6 has some gotchas so code could be nicer and simpler under python3)
from __future__ import print_function
import sys
import ctypes
libc = ctypes.cdll.LoadLibrary("libc.so.6")
if sys.version_info[0]>2:
def unichr(a):
return chr(a)
LP_c_char2 = ctypes.POINTER( ctypes.c_char_p)
LP_c_char = ctypes.POINTER(ctypes.create_string_buffer(16).__class__)
get_errno_loc = libc.__errno_location
get_errno_loc.restype = ctypes.POINTER(ctypes.c_int)
class MyError(OSError):
def __init__(self, e):
if sys.version_info[0]<=2:
super(MyError, self).__init__(e)
else:
super().__init__(e)
def errcheck(ret, func, args):
if ret == -1 or ret == 2**64-1:
e = get_errno_loc()[0]
raise MyError(e)
return ret
iconv_open = libc.iconv_open
iconv_open.restype = ctypes.c_void_p
ret = iconv_open(
ctypes.c_char_p(b"ISO8859-2//TRANSLIT"),
ctypes.c_char_p(b"UTF-8"))
iconv = libc.iconv
iconv.errcheck = errcheck
iconv.restype = ctypes.c_size_t
obuf = ctypes.create_string_buffer(16)
obuflen = ctypes.c_size_t(16)
optr = LP_c_char(obuf)
inp = b'\xe2\x80\x9c'
iconv.argtypes = [ctypes.c_void_p, LP_c_char2, ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(LP_c_char), ctypes.POINTER(ctypes.c_size_t)]
r = iconv(ret, LP_c_char2(ctypes.c_char_p(inp)), ctypes.byref(ctypes.c_size_t(len(inp))), ctypes.byref(optr), ctypes.byref(obuflen))
print(obuf.value)
def func(inp = b"bbb"):
assert len(inp)<16 , "too big input"
obuf = ctypes.create_string_buffer(16)
obuflen = ctypes.c_size_t(16)
optr = LP_c_char(obuf)
r = iconv(ret, LP_c_char2(ctypes.c_char_p(inp)), ctypes.byref(ctypes.c_size_t(len(inp))), ctypes.byref(optr), ctypes.byref(obuflen))
return obuf, obuflen, r, obuf.value[:16-obuflen.value]
oo, uu, r, vys = func(b'\xe2\x80\x9c\xe2\x80\x9c')
print(oo.raw, uu, r, vys)
for i in range(sys.maxunicode):
try:
oo, uu, r, vys = func(unichr(i).encode('utf-8'))
if vys==b'"':
print(i, unichr(i))
except UnicodeEncodeError:
pass
except MyError as E:
pass
# print("MyError: {E} , {i}".format(E=E, i=i))

Encrypt data using Objective-C and Decrypt in Python

I have the same issue as this question but unfortunately there was no answer on it.
I have the following objective-c code to encrypt using CCCrypt:
(NSData *)doCrypt:(NSData *)data usingKey:(NSData *)key withInitialVector:(NSData *)iv mode:(int)mode error: (NSError *)error
{
int buffersize = 0;
if(data.length % 16 == 0) { buffersize = data.length + 16; }
else { buffersize = (data.length / 16 + 1) * 16 + 16; }
// int buffersize = (data.length <= 16) ? 16 : data.length;
size_t numBytesEncrypted = 0;
void *buffer = malloc(buffersize * sizeof(uint8_t));
CCCryptorStatus result = CCCrypt(mode, 0x0, 0x1, [key bytes], [key length], [iv bytes], [data bytes], [data length], buffer, buffersize, &numBytesEncrypted);
return [NSData dataWithBytesNoCopy:buffer length:numBytesEncrypted freeWhenDone:YES];
}
I use kCCAlgorithmAES128 with kCCOptionPKCS7Padding as options and call the function with [Cryptor doCrypt:data usingKey:key withInitialVector:nil mode:0x0 error:nil];
Now I would like to decrypt it using python and to do so I have the following code:
def decrypt(self, data, key):
iv = '\x00' * 16
encoder = PKCS7Encoder()
padded_text = encoder.encode(data)
mode = AES.MODE_CBC
cipher = AES.new(key, mode, iv)
decoded = cipher.decrypt(padded_text)
return decoded
The PKCS7Encoder looks like this:
class PKCS7Encoder():
"""
Technique for padding a string as defined in RFC 2315, section 10.3,
note #2
"""
class InvalidBlockSizeError(Exception):
"""Raised for invalid block sizes"""
pass
def __init__(self, block_size=16):
if block_size < 2 or block_size > 255:
raise PKCS7Encoder.InvalidBlockSizeError('The block size must be ' \
'between 2 and 255, inclusive')
self.block_size = block_size
def encode(self, text):
text_length = len(text)
amount_to_pad = self.block_size - (text_length % self.block_size)
if amount_to_pad == 0:
amount_to_pad = self.block_size
pad = chr(amount_to_pad)
return text + pad * amount_to_pad
def decode(self, text):
pad = ord(text[-1])
return text[:-pad]
Yet whenever I call the decrypt() function, it returns garbage. Am I missing something or having a wrong option enabled somewhere?
Example in and output:
NSData *keyData = [[NSData alloc] initWithRandomData:16];
NSLog(#"key: %#", [keyData hex]);
NSString *str = #"abcdefghijklmno";
NSLog(#"str: %#", str);
NSData *encrypted = [Cryptor encrypt:[str dataUsingEncoding:NSUTF8StringEncoding] usingKey:keyData];
NSLog(#"encrypted str: %#", [encrypted hex]);
Gives:
key: 08b6cb24aaec7d0229312195e43ed829
str: a
encrypted str: 52d61265d22a05efee2c8c0c6cd49e9a
And python:
cryptor = Cryptor()
encrypted_hex_string = "52d61265d22a05efee2c8c0c6cd49e9a"
hex_key = "08b6cb24aaec7d0229312195e43ed829"
print cryptor.decrypt(encrypted_hex_string.decode("hex"), hex_key.decode("hex"))
Result:
láz
Which is weird, but if dump the hex I get 610f0f0f0f0f0f0f0f0f0f0f0f0f0f0fb02b09fd58cccf04f042e2c90d6ce17a and 61 = a so I think it just shows wrong.
A bigger input:
key: 08b6cb24aaec7d0229312195e43ed829
str: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
encrypted str: 783fce3eca7ebe60d58b01da3d90105a93bf2d659cfcffc1c2b7f7be7cc0af4016b310551965526ac211f4d6168e3cc5
Result:
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaôNÍ“ƒ˜�Üšw6C%
Here you see that the a's are printed with garbage... so I assume this is a padding error or something like that
The IV is nill at the iOs side and 16x 0's at the Python side (see the code)
Your decryption: aes_decrypt(pkcs7_pad(ciphertext))
Correct decryption: pkcs7_unpad(aes_decrypt(ciphertext))
It has to be done this way, because AES in CBC mode expects plaintexts of a multiple of the block size, but you generally want to encrypt arbitrary plaintexts. Therefore, you need to apply the padding before encryption and remove the padding after decryption.
Keep in mind that a - (b % a) cannot be 0 for any (positive) value of a or b. This means that
if amount_to_pad == 0:
amount_to_pad = self.block_size
is unreachable code and can be removed. Good thing is that a - (b % a) already does what you wanted to do with the if block.
You also should extend the unpad (decode) function to actually check whether every padding byte is the same byte. You should also check that the every padding byte is not zero or larger than the block size.

As3Crypto (flex) with PyCrypto - how to make AES work?

I have a client in Flex and a server in Python and i'm trying to make AES work between them but for some reason it doesn't work.
My server code:
import sys
from Crypto.Cipher import AES
from binascii import hexlify, unhexlify
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(s[-1])]
def encrypt(str):
cipher = AES.new(unhexlify('some 64 byte key here'), AES.MODE_CBC, '16 byte iv')
hex_str = hexlify(cipher.encrypt(pad(str)))
return hex_str
My client code:
static public function decrypt(txt:String) : String
{
var k:String = "some 64 byte key here";
var pad:IPad = new PKCS5();
var mode:ICipher = Crypto.getCipher("aes-cbc", Hex.toArray(k), pad);
pad.setBlockSize(mode.getBlockSize());
var ivmode:IVMode = mode as IVMode;
ivmode.IV = Hex.toArray(Hex.fromString("16 byte iv"));
var data:ByteArray = Hex.toArray(Hex.toString(txt));
mode.decrypt(data);
return Hex.fromArray(data);
}
It seems like a simple case but I'm doing something wrong. What is it?
BTW: I get RangeError: Error #2006: The supplied index is out of bounds from mode.decrypt(data);
I finally managed to get it to work.
The client code should look like this:
static public function decrypt(txt:String) : String
{
var k:String = "some 64 byte key here";
var pad:IPad = new PKCS5();
var mode:ICipher = Crypto.getCipher("aes-cbc", Hex.toArray(k), pad);
pad.setBlockSize(mode.getBlockSize());
var ivmode:IVMode = mode as IVMode;
ivmode.IV = Hex.toArray(Hex.fromString("16 byte iv"));
var data:ByteArray = Hex.toArray(txt);
mode.decrypt(data);
return Hex.toString(Hex.fromArray(data));
}

Categories

Resources