I'm reading a file from the frontend and converting it to JSON consists of uuid and a list of JSON objects:
class File(Resource):
def post(self):
if 'file' not in request.files:
return {"message": "No File in the request!"}, 400
file = request.files['file']
if file.filename == '':
return {'message': "No File selected!"}, 400
if file:
filename = secure_filename(file.filename)
#file_contents = file.read()
print(file)
converted_json = File.csv2json(file)
new_request = {
'uuid': str(uuid.uuid4()),
'devices': converted_json
}
print(new_request)
return new_request, 201
#classmethod
def csv_to_json(cls, data):
df = pd.read_csv(data, header=None)
df.columns = ['name', 'username', 'password', 'domain']
print(df)
df = df.to_json(orient='records')
print(df)
return df
Output is double encoded:
{
"uuid": "1a09ad79-dc78-4759-9aa0-f1dda9c08dc4",
"details": "[{\"name\":\"joe\",\"username\":\"admin\",\"password\":\"admin\",\"domain\":\"abc.xyz.com\"}]"
}
How to get JSON Object output like (with newline at the end of each ','):
{
"uuid": "1a09ad79-dc78-4759-9aa0-f1dda9c08dc4",
"details": [
{"name": "joe",
"username": "admin",
"password": "admin",
"domain"" "abc.xyz.com"}
]
}
Never mind!
Using 'json.loads(df)' after 'df = df.to_json(orient='records')' did the trick!
Related
I've got solution to add table without border suggested by Tanaike but I'm still facing issues in indexing.
I want to insert data in the document in the following order (function - insert_data(file_id)) -
Insert an image in a document (Index = 1)
Insert text in a document (index = 2)
Insert table in a document having invisible borders (index = 3)
Insert text in the document (index = 4)
Insert table in a document again having invisible borders (index = 5)
Insert new line (index = 6)
Insert image in a document (index = 7)
The code I'm trying is-
import io
from gdoctableapppy import gdoctableapp
SERVICE_FILENAME = 'C:/Users/XYZ/Testpython/service_account.json' # set path to service account filename
from googleapiclient.discovery import build
from google.oauth2 import service_account
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
credentials = service_account.Credentials.from_service_account_file(SERVICE_FILENAME,
scopes=['https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/documents']
)
docs = build('docs', 'v1', credentials=credentials)
drive = build('drive', 'v3', credentials=credentials)
def create_file(file_name):
file_metadata = {
"title": file_name,
"body": {}
}
file = docs.documents().create(body=file_metadata).execute()
print('File ID: %s' % file.get('documentId'))
file_id = file.get('documentId')
try:
permission = {
"role": "writer",
"type": "user",
'emailAddress': 'xyz#gmail.com'
}
result = drive.permissions().create(fileId=file_id, body=permission).execute()
print(result)
return file_id
except Exception as e:
print('An error occurred:', e)
return None
def insert_data(file_id):
requests = []
values = [['Name of the Client/Organization', 'XYZ'], ['Industry', 'Software']]
requests.append(insert_table_data(file_id, values, index=3))
values2 = [['Country', 'India'], ['State', 'UP']]
requests.append(insert_table_data(file_id, values2, index=5))
requests.append(insert_image(index=1))
requests.append(insert_text(2, '\ntext\n'))
requests.append(insert_text(4, '\nDemo text\n'))
requests.append(insert_text(6, '\n'))
requests.append(insert_image(index=7))
result = docs.documents().batchUpdate(documentId=file_id, body={'requests': requests}).execute()
def insert_image(index):
image_data = {
'insertInlineImage': {
'location': {
'index': index
},
'uri':
'https://www.oberlo.com/media/1603970279-pexels-photo-3.jpg?fit=max&fm=jpg&w=1824',
'objectSize': {
'height': {
'magnitude': 350,
'unit': 'PT'
},
'width': {
'magnitude': 350,
'unit': 'PT'
}
}
}
}
return image_data
def insert_text(index, text):
text_data = {
"insertText":
{
"text": text,
"location":
{
"index": index
}
}
}
return text_data
def insert_table_data(file_id, values, index):
documentId = file_id
resource = {
"oauth2": credentials,
"documentId": documentId,
"rows": len(values),
"columns": len(values[0]),
# "append": True,
"createIndex": index,
"values": values,
}
gdoctableapp.CreateTable(resource)
resource = {
"oauth2": credentials,
"documentId": documentId,
}
res = gdoctableapp.GetTables(resource)
obj = {"color": {"color": {}}, "dashStyle": "SOLID", "width": {"magnitude": 0, "unit": "PT"}}
data = {
"updateTableCellStyle": {
"tableCellStyle": {
"borderBottom": obj,
"borderTop": obj,
"borderLeft": obj,
"borderRight": obj,
},
"tableStartLocation": {
"index": res['tables'][-1]['tablePosition']['startIndex']
},
"fields": "borderBottom,borderTop,borderLeft,borderRight"
}
}
# docs.documents().batchUpdate(documentId=documentId, body={'requests': requests}).execute()
return data
def download_as_docx(file_id):
results = drive.files().get(fileId=file_id, fields="id, name, mimeType, createdTime").execute()
docMimeType = results['mimeType']
mimeTypeMatchup = {
"application/vnd.google-apps.document": {
"exportType": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "docExt": "docx"
}
}
exportMimeType = mimeTypeMatchup[docMimeType]['exportType']
# docExt = mimeTypeMatchup[docMimeType]['docExt']
docName = results['name']
request = drive.files().export_media(fileId=file_id,
mimeType=exportMimeType) # Export formats : https://developers.google.com/drive/api/v3/ref-export-formats
# fh = io.FileIO(docName + "." + docExt, mode='w')
fh = io.FileIO(docName, mode='w')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
def download_as_pdf(file_id, file_name):
request = drive.files().export_media(fileId=file_id,
mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
fh.seek(0)
filename = file_name.split('.docx')[0] + '.pdf'
with open(filename, 'wb') as fx:
fx.write(fh.getvalue())
def delete_gdrive_file(file_id):
"""Deleted file on Google Drive
:param file_id: ID of Google Drive file
"""
response = drive.files().delete(fileId=file_id).execute()
print(response)
if __name__ == '__main__':
file_name = 'Data.docx'
file_id = create_file(file_name)
insert_data(file_id)
download_as_docx(file_id)
download_as_pdf(file_id, file_name)
delete_gdrive_file(file_id)
Error:
returned "Invalid requests[0].insertTable: Index 4 must be less than the end index of the referenced segment, 2.". Details: "Invalid requests[0].insertTable: Index 4
must be less than the end index of the referenced segment, 2.">
I guess end index of the table goes to 67 but even if I try to insert new data at index 68, it either appends in the last cell of the table or it throws indexing error sometimes.
I should I make the whole data insertion flow dynamic in the google docs.
Modification points:
The library gdoctableapp creates the table by one call. By this, when you request the flow of your question, the index is changed for the tables. I thought that this is the reason of your issue.
In this case, how about the following modification?
Modified script:
Please modify insert_table_data as follows.
def insert_table_data(file_id, values, index):
documentId = file_id
resource = {
"oauth2": credentials,
"documentId": documentId,
"rows": len(values),
"columns": len(values[0]),
# "append": True,
"createIndex": index,
"values": values,
}
gdoctableapp.CreateTable(resource)
And also, please modify insert_data as follows.
def insert_data(file_id):
# Insert texts and images.
index = 1
requests = []
requests.append(insert_image(index))
index += 1
text1 = '\ntext\n'
requests.append(insert_text(index, text1))
index += len(text1)
table1 = index
text2 = '\nDemo text\n'
requests.append(insert_text(index, text2))
index += len(text2)
table2 = index
text3 = '\n'
requests.append(insert_text(index, text3))
index += len(text3)
requests.append(insert_image(index))
docs.documents().batchUpdate(documentId=file_id, body={'requests': requests}).execute()
# Create tables.
values2 = [['Country', 'India'], ['State', 'UP']]
insert_table_data(file_id, values2, table2)
values1 = [['Name of the Client/Organization', 'XYZ'], ['Industry', 'Software']]
insert_table_data(file_id, values1, table1)
# Remove borders of tables.
resource = {"oauth2": credentials, "documentId": file_id}
res = gdoctableapp.GetTables(resource)
obj = {"color": {"color": {}}, "dashStyle": "SOLID", "width": {"magnitude": 0, "unit": "PT"}}
reqs = []
for e in res['tables']:
data = {
"updateTableCellStyle": {
"tableCellStyle": {
"borderBottom": obj,
"borderTop": obj,
"borderLeft": obj,
"borderRight": obj,
},
"tableStartLocation": {
"index": e['tablePosition']['startIndex']
},
"fields": "borderBottom,borderTop,borderLeft,borderRight"
}
}
reqs.append(data)
docs.documents().batchUpdate(documentId=file_id, body={'requests': reqs}).execute()
In this modification, I separate the texts and images, and the tables. By this, the index of tables can be correctly retrieved.
Note:
This modified script is for your question. So when your actual situation is different from your question, this modified script might not be able to be directly used. So please be careful about this.
I am trying to mock some S3 operations and after banging my head against the stubber object, I tried doing something as follows:
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "ListObjectsV2":
return {
"KeyCount": 1,
"Contents": [
{"Key": "sensor_1", "LastModified": "2021-11-30T12:58:14+00:00"}
],
}
elif operation_name == "GetObjectTagging":
return {"TagSet": []}
elif operation_name == "HeadObject":
return {
"ContentLength": 10,
"ContentType": "gzip",
"ResponseMetadata": {
"Bucket": "1",
},
}
elif operation_name == "GetObject":
content = get_object_response()
return {
"ContentLength": len(content),
"ContentType": "xml",
"ContentEncoding": "gzip",
"Body": content,
"ResponseMetadata": {
"Bucket": "1",
},
}
Ot is the s3 download_fileoperation which is giving me a headache. As far as I can tell it generates, the HeadObjectand GetObjectcalls.
My content generation method is as follows:
def get_object_response():
content = b"<some-valid-xml>"
buf = BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(content)
compressed.close()
return buf.getvalue()
The way it gets used is:
with NamedTemporaryFile() as tmp:
s3_client.download_file(Bucket=..., Key=..., Filename=tmp.name)
However, my test fails with:
elf = <s3transfer.utils.StreamReaderProgress object at 0x116a77820>
args = (262144,), kwargs = {}
def read(self, *args, **kwargs):
> value = self._stream.read(*args, **kwargs)
E AttributeError: 'bytes' object has no attribute 'read'
I simply cannot figure out how to encode the response so that the generated content can be saved.
So, I have an endpoint that works more or less like this:
from flask import Flask, request, jsonify
from flask_cors import CORS
import json
from werkzeug.utils import secure_filename
import os
from mylib import do_stuff
path = os.getcwd()
UPLOAD_FOLDER = os.path.join(path, 'data')
# #load flask
app = Flask(__name__)
CORS(app)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['JSON_AS_ASCII'] = False
print(UPLOAD_FOLDER,flush=True)
#app.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
file = request.files['file']
if file:
try:
# Receives a file and saves on the server
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
print("saving_here {}".format(file_path))
file.save(file_path)
# The result here is a dict of dicts of dicts
# It consists of a dictionary of DataFrames().to_dict()
result = do_stuff(file_path)
response = app.response_class(
response=json.dumps(result ),
status=200,
mimetype='application/json'
)
return response
except Exception as e:
print(e,flush=True)
return "error"
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port= <PORT>)
The main issue here is that on the front-end sometimes I receive an answer with the "message" key inside data and sometimes I receive one without it(Which is what I expect). The incorrect response:
"response": {
"data": {
"message": "{\"0\": {\"0\": {\"Item\": \"Desinfetante 5L Max Talco Azul\", \"Qtd\": 2, \"UM\": \"GL\", \"Qtd_UM\": \"5L\", \"Qtd_Ttl\": \"10L\"}, \"1\": {\"Item\": \"Caf\\u00e9 A V\\u00e1cuo Tradicional 500G\", \"Qtd\": 10, \"UM\": \"PC\", \"Qtd_UM\": \"500g\", \"Qtd_Ttl\": NaN}}}"
},
"headers": {
"content-type": [
"application/json"
],
"content-length": [
"227"
],
"access-control-allow-origin": [
"*"
],
"server": [
"Werkzeug/1.0.1 Python/3.8.6"
],
"date": [
"Fri, 11 Dec 2020 13:16:32 GMT"
]
},
"status": 200,
"statusText": "OK"
}
}
The expected response (only the data entry):
"response": {
"data": {
"0": {
"0": {
"Pedido": 997,
"Qtd": 5,
"Item": "Água Sanitária 1 Litro",
"Fornecedor": "YYYY"
},
"1": {
"Pedido": 997,
"Qtd": 2,
"Item": "Limpa Vidros Audax Facilita 500ml",
"Fornecedor": "XXXX"
}}}
When I make a post directly from python as in:
import requests
files = {'file': open('<path_to_file>','rb')}
r = requests.post(url="<url>/upload", files = files)
r.json()
Out[12]:
{'0': {'0': {'Item': 'Desinfetante 5L Max Talco Azul',
'Qtd': 2,
'UM': 'GL',
'Qtd_UM': '5L',
'Qtd_Ttl': '10L'},
'1': {'Item': 'Café A Vácuo Tradicional 500G',
'Qtd': 10,
'UM': 'PC',
'Qtd_UM': '500g',
'Qtd_Ttl': nan}}}
r.text
Out[16]: '{"0": {"0": {"Item": "Desinfetante 5L Max Talco Azul", "Qtd": 2, "UM": "GL", "Qtd_UM": "5L", "Qtd_Ttl": "10L"}, "1": {"Item": "Caf\\u00e9 A V\\u00e1cuo Tradicional 500G", "Qtd": 10, "UM": "PC", "Qtd_UM": "500g", "Qtd_Ttl": NaN}}}'
I get the expected json response every time and cannot recreate the issue I have with react, even with the same files and headers.
Things tried:
return json.dumps(result)
return jsonify(resutl)
return response
I found that your response data has \"Qtd_Ttl\": NaN (in the unexpected response you are getting) which is invalid format as a string and that is not parsable to JSON.
So if your data has a valid value for the key "Qtd_Ttl", then you will get the expected result, and if the value is invalid, you will get the response with message key.
This is the reason you are getting a weird format in your frontend.
I think you are using Axios on the frontend.
If you are using Axios, I found that this happens when the JSON response from the server is invalid, use a JSON validator like https://jsonlint.com/ to make sure that your JSON is correctly formatted.
code :
userid1='u123'
userid2='u124'
ids= (userid1,userid2)
fake = Faker('en_US')
for ind in ids:
for idx in range(1):
sms = {
"id": ind ,
"name": fake.name(),
"email": fake.email(),
"gender": "MALE",
}
f_name = '{}.json'.format(ind)
with open(f_name, 'w') as fp:
#Save the dictionary
json.dump(sms, fp, indent=4)
print(sms)
file1 = filename.json ( how to get the *ind* value here i.e., userid)
fd1=open("filename.json")
json_content1 = fd1.read()
fd1.close()
how to open file that has been saved f_name = '{}.json'.format(ind) here . without mentioning the file name manually. file names are saved using ind. so how to use ind here and open the file
this code can help you to get data from json file: you can get any filed from the json data by typing data["name-of-filed"]:
import json
userid1='json_file1'
ids= [userid1]
for ind in ids:
f_name = '{}.json'.format(ind)
with open(f_name, 'r') as outfile:
data = json.loads(outfile.read())
print(data["name"])
print(data)
here is an exemple :
file.json :
{
"name": "Ghassen",
"apiVersion": "v1"
}
output :
Ghassen
{'name': 'Ghassen', 'apiVersion': 'v1'}
I'm new to Python programming, so do bear with me if I make any mistakes anywhere
I'm trying to write a json file using 2 dictionaries and dump the output to the file using the following code on Windows
import json
import sys
import string
from time import strftime
scan_results = open("scan_results.txt", "r")
saved = sys.stdout
f = file('report.json', 'wb')
sys.stdout = f
for line in scan_results:
if ".jpg" in line:
lst = []
result = line.split('\\')
result_split = result[5].split(' ')
filename = result_split[0]
raw_status = result_split[3]
if "OK" in raw_status:
status = "Okay"
status_code = "0"
dict = {'FileName': filename, 'DateTime': strftime("%Y-%m-%d %H:%M:%S"), 'statusCode': status_code, 'Description': status}
dict2 = {filename : dict}
print json.dumps(dict2)
sys.stdout = saved
f.close()
print "JSON report written"
The problem is, the output that I have is
{
"car-30537.jpg": {
"statusCode": "0",
"DateTime": "2012-02-07 09:52:26",
"Description": "Okay",
"FileName": "car-30537.jpg"
}
}{
"car-30538.jpg": {
"statusCode": "0",
"DateTime": "2012-02-07 09:52:26",
"Description": "Okay",
"FileName": "car-30538.jpg"
}
}
whereas the output that I want is
{
"car-30537.jpg": {
"statusCode": "0",
"DateTime": "2012-02-07 09:52:26",
"Description": "Okay",
"FileName": "car-30537.jpg"
},
{
"car-30538.jpg": {
"statusCode": "0",
"DateTime": "2012-02-07 09:52:26",
"Description": "Okay",
"FileName": "car-30538.jpg"
}
}
Is there any ways to correct this problem? Thanks in advance
You are making lots of dicts, while you only need one main containing one:
import json
import sys
import string
from time import strftime
scan_results = open("scan_results.txt", "r")
saved = sys.stdout
f = file('report.json', 'wb')
sys.stdout = f
dict2 = {} #Create one output dict
for line in scan_results:
if ".jpg" in line:
lst = []
result = line.split('\\')
result_split = result[5].split(' ')
filename = result_split[0]
raw_status = result_split[3]
if "OK" in raw_status:
status = "Okay"
status_code = "0"
dict2[filename] = {'FileName': filename, 'DateTime': strftime("%Y-%m-%d %H:%M:%S"), 'statusCode': status_code, 'Description': status} #Add to that dict.
print json.dumps(dict2) #Print it out at the end.
sys.stdout = saved
f.close()
print "JSON report written"
I added comments to modified lines.