I need some help. I need to save the .csv file into local folder using Python but its storing the blank file. I am explaining my code below.
views.py:
report = Reactor.objects.all()
filename = str(uuid.uuid4()) + '.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+filename
with open(settings.FILE_PATH + filename, 'w') as csv_file:
file_writer = csv.writer(csv_file)
response_writer = csv.writer(response)
file_writer.writerow(['Name', 'Status', 'Date'])
response_writer.writerow(['Name', 'Status', 'Date'])
for rec in report:
if rec.status == 1:
status = 'Start'
if rec.status == 0:
status = 'Stop'
if rec.status == 2:
status = 'Suspend'
file_writer.writerow([rec.rname, status, rec.date])
response_writer.writerow([rec.rname, status, rec.date])
return response
settings.py:
FILE_PATH = os.getcwd()+'/upload/'
Here I am also downloading the file and I need to save that file into folder but here some blank file is storing. Please help me.
As jasonharper said you are only writing the csv data to the response and not to the file on the disk. Start by creating another writer object:
file_writer = csv.writer(open(settings.FILE_PATH + filename, 'w'))
Now each time you call writerow on writer also do this for the file_writer:
file_writer.writerow(['Name', 'Status', 'Date'])
...
file_writer.writerow([rec.rname, status, rec.date])
It is best to use the with statement to let python automatically close the file:
with open(settings.FILE_PATH + filename, 'w') as csv_file:
file_writer = csv.writer(csv_file)
response_writer = csv.writer(response)
file_writer.writerow(['Name', 'Status', 'Date'])
response_writer.writerow(['Name', 'Status', 'Date'])
for rec in report:
...
file_writer.writerow([rec.rname, status, rec.date])
response_writer.writerow([rec.rname, status, rec.date])
return response
Related
def test_upload_csv_success(self):
"""Test uploading a csv file"""
with open("innovators.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(["SN", "Name", "Contribution"])
writer.writerow([1, "Linus Torvalds", "Linux Kernel"])
writer.writerow([2, "Tim Berners-Lee", "World Wide Web"])
writer.writerow([3, "Guido van Rossum", "Python Programming"])
with open("innovators.csv", "r") as file:
res = self.client.post(
CSV_URL, {"file": file}, content_type="multipart/form-data"
)
file.close()
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
#self.assertIn('file', res.data)
#self.assertTrue(os.path.exists(self.csv_model.file.path))
Below is the error, I/m getting
System check identified no issues (0 silenced).
.F.
FAIL: test_upload_csv_success (core.tests.test_csv_api.CsvUploadTests)
Test uploading a csv file
Traceback (most recent call last):
File "/Users/rounaktadvi/django_rest_api_projects/csv-store-api/core/tests/test_csv_api.py", line 56, in test_upload_csv_success
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
AssertionError: 400 != 201
I figured it, out here's what i did
#patch("pandas.read_csv")
#patch("pandas.DataFrame.to_sql")
def test_upload_csv_success(self, mock_read_csv, mock_to_sql) -> None:
"""Test uploading a csv file"""
file_name = "test.csv"
# Open file in write mode (Arrange)
with open(file_name, "w") as file:
writer = csv.writer(file)
# Add some rows in csv file
writer.writerow(["name", "area", "country_code2", "country_code3"])
writer.writerow(
["Albania", 28748, "AL", "ALB"],
)
writer.writerow(
["Algeria", 2381741, "DZ", "DZA"],
)
writer.writerow(
["Andorra", 468, "AD", "AND"],
)
# open file in read mode
data = open(file_name, "rb")
# Create a simple uploaded file
data = SimpleUploadedFile(
content=data.read(), name=data.name, content_type="multipart/form-data"
)
# Perform put request (Act)
res = self.client.put(CSV_URL, {"file_name": data}, format="multipart")
# Mock read_csv() and to_sql() functions provided by pandas module
mock_read_csv.return_value = True
mock_to_sql.return_value = True
# Assert
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(res.data, "Data set uploaded")
# Delete the test csv file
os.remove(file_name)
I am trying to create an Excel file using pandas and serving it to the user as a downloadable file via Django. I put together some different answers on the topic that I found on here and ended up with this code:
collection = [{"title": "something", "price": 34, "quantity": 23}, {..}]
output = BytesIO()
df = pd.DataFrame(collection, columns=['title', 'price', 'quantity'])
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
output.seek(0)
workbook = output.getvalue()
response = StreamingHttpResponse(workbook, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = f'attachment; filename={output_name}.xlsx'
return response
It all works well until I try to open the resulting file - I can an error saying that the file is damaged or that there is something wrong with the data-format. I suspect that it could have something to do with the data being binary? How can I resolve this issue?
SOLUTION
Turns out I had to remove some stuff so the code looks like this now and works fine:
collection = [{"title": "something", "price": 34, "quantity": 23}, {..}]
output = BytesIO()
df = pd.DataFrame(collection, columns=['title', 'price', 'quantity'])
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
output.seek(0)
# workbook = output.getvalue()
response = StreamingHttpResponse(output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = f'attachment; filename={output_name}.xlsx'
return response
I think you might be making that a lot more complicated than it needs to be.
Below works fine for me:
import pandas as pd
from django.http import HttpResponse
df = pd.DataFrame(data)
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename="filename.xlsx"'
df.to_excel(response)
return response
SOLUTION
Turns out I had to remove some stuff so the code looks like this now and works fine:
collection = [{"title": "something", "price": 34, "quantity": 23}, {..}]
output = BytesIO()
df = pd.DataFrame(collection, columns=['title', 'price', 'quantity'])
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
output.seek(0)
# workbook = output.getvalue()
response = StreamingHttpResponse(output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = f'attachment; filename={output_name}.xlsx'
return response
Probably a datatype issue when opening in Excel, try converting the data into strings and then create excel and try.
Another thought is to create file with a sample set of records, rather than whole frame to validate if its a data issue. There might be an issue with Nan's in the dataset as well. Check if you need to ignore/convert/replace that.
I am a Python noob, and working with the Plaid API to get bank transactions. I would like each transaction to be it's own line, and I only want to pull four values per record: date, _account, name & amount, and populate a CSV file with that data. I have the below code which populates a single line CSV (JSON file also attached). I can't seem to figure out what I am looking for as far as examples on how to do this after a bit of Googling. Any help is much appreciated.
import csv
#Configuration
from plaid import Client
Client.config({
'url': 'https://api.plaid.com'
})
#Connect to Plaid
from plaid import Client
from plaid import errors as plaid_errors
from plaid.utils import json
client = Client(client_id='test_id', secret='test_secret')
account_type = 'suntrust'
try:
response = client.connect(account_type, {
'username': 'plaid_test',
'password': 'plaid_good'
})
except plaid_errors.PlaidError:
pass
else:
connect_data = response.json()
#Get transactions from Plaid
response = client.connect_get()
transactions = response.json()
#Save the transactions JSON response to a csv file in the Python Projects directory
with open('transactions.csv', 'w') as outfile:
json.dump(transactions, outfile)
csvfile = open('transactions.csv', 'r')
jsonfile = open('transactions.json', 'w')
fieldnames = ("date", "_account","name","amount")
reader = csv.DictReader(csvfile, fieldnames)
for row in reader:
json.dump(row, jsonfile)
jsonfile.write('\n')
JSON FILE
I think you are making this over-complicated and confusing JSON with CSV. Hat tip to #thalesmallo who beat me to the punch on using the DictWriter class. Try this:
import csv
from plaid import Client
Client.config({
'url': 'https://api.plaid.com'
})
#Connect to Plaid
from plaid import Client
from plaid import errors as plaid_errors
from plaid.utils import json
client = Client(client_id='test_id', secret='test_secret')
account_type = 'suntrust'
try:
response = client.connect(account_type, {
'username': 'plaid_test',
'password': 'plaid_good'
})
except plaid_errors.PlaidError:
pass
else:
connect_data = response.json()
response = client.connect_get()
data = response.json()
transactions = data['transactions'] # see https://plaid.com/docs/api/#data-overview
#Save the transactions JSON response to a csv file in the Python Projects directory
header = ("date", "_account", "name", "amount")
with open('transactions.csv', 'w') as f:
writer = csv.DictWriter(f, fieldnames=header, extrasaction='ignore')
writer.writeheader()
for x in transactions:
writer.writerow(x)
I am using django 1.8 and python 3.4 and trying to create a json file and then writing into it, after that I need to save it to my database but on save it returns me an error '_io.TextIOWrapper' object has no attribute '_committed'. Can anyone please help where I am doing wrong?
Here is my models.py
class ConvertedFile(models.Model):
file = models.FileField(upload_to='json/upload', max_length=5000)
created_on = models.DateTimeField(auto_now_add=True)
My views.py is-
def convert_file(request):
url = request.GET.get('q', None)
r = requests.get(url, stream=True)
with open('file.csv', 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
csvfile = open("file.csv", "r")
jsonfile = open("file.json", "w")
csv_rows = []
reader = csv.DictReader(csvfile)
title = reader.fieldnames
try:
for row in reader:
csv_rows.extend([{title[i]: row[title[i]] for i in range(len(title))}])
except:
pass
jsonfile.write(json.dumps(csv_rows, sort_keys=False, indent=4, separators=(',', ': '), ensure_ascii=False))
os.remove("file.csv")
jsonfile.close()
new_json = ConvertedFile.objects.create()
new_json.file = jsonfile
new_jsone.save()
The error raises on model.save() in the last line, right? The line above new_json.file = jsonfile is the problem. You pass the reference to a closed (plain python) file object to the FileField from django and it does not know how to deal with it (_commited is missing for example).
Have a look at Django - how to create a file and save it to a model's FileField?
Want to prompt browser to save csv using pyramid.response.Response searched for clues and found here's a link Django answer but i can't use it with Pyramid wsgi my code looks like this:
from pyramid.response import Response
def get_list_names_emails(request):
session, env = request.db, request.client_env
response = Response(content_type='text/csv')
output = StringIO()
writer = csv.writer(output)
writer.writerow(['SomeName', 'SomeEmail', 'CompanyName])
csv_output = output.getvalue()
return csv_output
As a cleaner way to do that, you can register a renderer.
In your configuration set-up, add:
config.add_renderer(name='csv',
factory='mypackage.renderers.CSVRenderer')
then in mypackage/renderers.py:
class CSVRenderer(object):
def __init__(self, info):
pass
def __call__(self, value, system):
fout = StringIO.StringIO()
writer = csv.writer(fout, delimiter=';', quoting=csv.QUOTE_ALL)
writer.writerow(value['header'])
writer.writerows(value['rows'])
resp = system['request'].response
resp.content_type = 'text/csv'
resp.content_disposition = 'attachment;filename="report.csv"'
return fout.getvalue()
After that, you can decorate your view with the renderer:
#view_config(..., renderer='csv')
def myview(self):
header = ['name', 'surname', 'address']
rows = [
(
row['name'],
row['surname'],
row['address'],
)
for row in query_rows(.....)
]
return {
'header': header,
'rows': rows
}
The advantage of this approach is better testable view code (you just check for the dictionary values, no need to parse anything) and you can also add a XLS or whatever renderer to the same view:
#view_config(..., renderer='xls')
#view_config(..., renderer='csv')
def myview(self):
...
Try adding Content-Disposition:
response['Content-Disposition'] = 'attachment; filename="report.csv"'
It's better to set content type as well
response['Content-type'] = 'text/csv'
response['Content-Disposition'] = 'attachment; filename="report.csv"'