Writing data to files after few iteration in python - python

I want to write data into five files on every fifth iteration, is there any way to do that, I am confused how to fetch the past data
i=1
while 1:
data = random.randint(0,100)
print(data)
if(i%5==0):
with open('D:\mydata\my%d.csv'%(i-4),'D:\mydata\my%d.csv'%(i-3), "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
level_counter = 0
max_levels = 1
while level_counter < max_levels:
filename1 = data
writer.writerow(("No load", filename1))
level_counter = level_counter +1
print("done")
i=i+1
time.sleep(2)

Just use a list to store data from the past 5 iterations:
i = 1
past_data = []
while True:
data = random.randint(0, 100)
past_data.append(data)
if i % 5 == 0:
...
past_data = []
i += 1

Related

How to re-write a while statement as an if statement in Python 2.7?

I wrote a script that searches an excel document for 'X', and when it finds an 'X' it copies the first column and first row associated with the 'X' into a CSV file.
I've been told that there's a better way to do this with 'if' statements. Not quite sure how.
Here's the code:
import xlrd
import csv
###Grab the data from sheet 1
def get_row_values(workSheet, row):
to_return = []
num_cells = myWorksheet.ncols - 1
curr_cell = -1
while curr_cell < num_cells:
curr_cell += 1
cell_value = myWorksheet.cell_value(row, curr_cell)
to_return.append(cell_value)
return to_return
file_path = 'foo.xlsx'
output = []
#Write the data
myWorkbook = xlrd.open_workbook(file_path)
myWorksheet = myWorkbook.sheet_by_name('foosheet')
num_rows = myWorksheet.nrows - 1
curr_row = 0
column_names = get_row_values(myWorksheet, curr_row)
#print("TOTAL ENTRIES:")
#print len(column_names)
#print("-----")
framework_name = myWorksheet.cell(0,2)
framework_version = myWorksheet.cell(0,3)
while curr_row < num_rows:
curr_row += 1
row = myWorksheet.row(curr_row)
this_row = get_row_values(myWorksheet, curr_row)
x = 0
while x <len(this_row):
if this_row[x] == 'x':
output.append(['', fooname, foo_version,
foo_names[x], foo_row[0]])
myData = [["foo1", "foo2",
"foo3", "foo4", "foo5"]]
myFile = open('./results/barTemp.csv', 'w')
with myFile:
writer = csv.writer(myFile)
writer.writerows(myData)
writer.writerows(output)
x += 1
#print output
myFile.close()
myWorkbook.release_resources()
Its not necessarily better. Still the same runtime-complexity.
The difference would be a more compact line:
For example, you can change
while x < len(this_row):
to
for x in this_row:
but I see that you use the 'x' index to find column_names[x] so another approach might be better such as
for x in range(len(this_row)):

Sorting Columns within csv files(Python)

Having issues with code. was given a file called "racing.csv" that stored the variables found in the "Drive" class. Concept behind the problem is that the program should sort the racetimes (lowest to highest) and assign points to the top 3 racers then export this data to a new file. All code is working fine aside from when I'm calling the shortBubbleSort on Drive and isn't sorting the racetimes correctly. Help is appreciated.
import csv
class Drive(object):
driver = ""
team = ""
racetime = 0.0
points = 0
def __init__(self,driver,team,racetime,points):
self.driver = driver
self.team = team
self.racetime = racetime
self.points = points
f = open('racing.csv', 'r')
csv_f = list(csv.reader(f))
driverclasses = []
for i in range(len(csv_f)):
d = Drive(csv_f[i][0],csv_f[i][1],csv_f[i][2],csv_f[i][3])
driverclasses.append(d)
for row in csv_f:
print (row)
for x in range(0, 6):
csv_f[x][2]=(input("Enter Racetime"))
def shortBubbleSort(alist):
exchanges = True
passnum = len(alist)-1
while passnum > 0 and exchanges:
exchanges = False
for i in range(passnum):
if alist[i]>alist[i+1]:
exchanges = True
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp
passnum = passnum-1
shortBubbleSort(Drive)
print(csv_f)
csv_f[0][3] = 25
csv_f[1][3] = 18
csv_f[2][3] = 15
f = open('RacingResults.csv', 'w')
for row in csv_f:
print (row)
Does this help?
**range function sintax*: range([start], stop[, step])
start: Starting number of the sequence.
stop: Generate numbers up to, but not including this number.
step: Difference between each number in the sequence.
def shortBubbleSort(alist):
for passnum in range(len(alist)-1,0,-1):
for i in range(passnum):
if alist[i]>alist[i+1]:
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp

Opening a file for append error

I'm trying to open a file for appending, but I keep getting the "except" portion of my try/except block, meaning there is some sort of error with the code but I can't seem to find what exactly is wrong with it. It only happens when I try to open a new file like so:
results = open("results.txt", "a")
results.append(score3)
Here's my full code:
import statistics
# input
filename = input("Enter a class to grade: ")
try:
# open file name
open(filename+".txt", "r")
print("Succesfully opened", filename,".txt", sep='')
print("**** ANALYZING ****")
with open(filename+".txt", 'r') as f:
counter1 = 0
counter2 = 0
right = 0
answerkey = "B,A,D,D,C,B,D,A,C,C,D,B,A,B,A,C,B,D,A,C,A,A,B,D,D"
a = []
# validating files
for line in f:
if len(line.split(',')) !=26:
print("Invalid line of data: does not contain exactly 26 values:")
print(line)
counter2 += 1
counter1 -= 1
if line.split(",")[0][1:9].isdigit() != True:
print("Invalid line of data: wrong N#:")
print(line)
counter2 += 1
counter1 -= 1
if len(line.split(",")[0]) != 9:
print("Invalid line of data: wrong N#:")
print(line)
counter2 += 1
counter1 -= 1
counter1 += 1
#grading students
score = len(([x for x in zip(answerkey.split(","), line.split(",")[1:]) if x[0] != x[1]]))
score1 = 26 - score
score2 = score1 / 26
score3 = score2 * 100
a.append(score3)
# results file
results = open("results.txt", "a")
results.write(score3)
# in case of no errors
if counter2 == 0:
print("No errors found!")
# calculating
number = len(a)
sum1 = sum(a)
max1 = max(a)
min1 = min(a)
range1 = max1 - min1
av = sum1/number
# turn to int
av1 = int(av)
max2 = int(max1)
min2 = int(min1)
range2 = int(range1)
# median
sort1 = sorted(a)
number2 = number / 2
number2i = int(number2)
median = a[number2i]
median1 = int(median)
# mode
from statistics import mode
mode = mode(sort1)
imode = int(mode)
# printing
print ("**** REPORT ****")
print ("Total valid lines of data:", counter1)
print ("Total invalid lines of data:", counter2)
print ("Mean (average) score:", av1)
print ("Highest score:", max2)
print("Lowest score:", min2)
print("Range of scores:", range2)
print("Median Score:", median1)
print("Mode score(s):", imode)
results.close()
except:
print("File cannot be found.")
I don't think there is a method called append for writing into file. You can use the write or writelines method only to write. As you already opened the file with append permissions. It wont change the old data and will append the text to the file.
f=open('ccc.txt','a')
f.write('Hellloooo')
f.close()
Hope it helps.

How to read/processes 100 lines at a time from 10000 lines in python?

How to pass/processes 100 lines or lower to try: at a time ?
receipt_dict = {}
with open("data.txt", "r") as plain_text: // ** 10000+ lines **
for line in plain_text:
hash_value = line.strip()
receipt_dict[hash_value] = 1
try:
bitcoind.sendmany("", receipt_dict) // **here must loop 100 at a time**
With generators. Here, load_data_chunks accumulates data in receipt_dict until its size exceeds chunk_size and yields it back to main loop below.
def load_data_chunks(path, fname, chunk_size):
receipt_dict = {}
with open(fname, "r") as plain_text:
for line in plain_text:
hash_value = line.strip()
receipt_dict[hash_value] = 1
if len(receipt_dict) > chunk_size:
yield receipt_dict
receipt_dict = {}
yield receipt_dict
for chunk in load_data_chunks("data.txt", 100):
try:
...
Process it as a list of dictionaries, keeping track of the size of each dictionary:
receipt_dicts = []
current_dict = {}
with open("data.txt", "r") as plain_text: # ** 10000+ lines **
for line in plain_text:
if len(current_dict) == 100:
receipt_dict.append(current_dict)
current_dict = {}
current_dict[line.strip()] = 1
receipt_dict.append(current_dict)
You can then loop through this list and process one dictionary at a time.

Loop through csv rows and check for a specific value

Hello I got a question regarding loops. The situation now is that I got a csv file where I check whether in column3 (row[2]) the value "1" is present. If not just skip it and loop again with add up value:
i = 1
maxuserid = 7255
result_liked = []
with open('source/to/file/user_id%i.csv' %i,'r') as fin:
for row in csv.reader(fin, delimiter='\t'):
if int(row[2]) >= 1:
result_liked.append(row)
i += 1
else:
i += 1
#more code
The thing is that I need a for loop that runs all the code and after the run is completed add the value "1" up to my i variable.
The goal of my code is to run the whole code and after it is done I want to add up the value i from 1 to 2 and run the loop again, untill the maxuserid of 7255 is reached. How can I get a loop that does this from 1 till 7255?
EDIT:
import csv
maxuserid = 7255
result_liked = []
for i in range(maxuserid):
with open('source/to/file/user_id%i.csv' %(i+1),'r') as fin:
for row in csv.reader(fin, delimiter='\t'):
if int(row[2]) >= 1:
result_liked.append(row)
training_data = result_liked[:2]
test_data = result_liked[2:]
training_data_bookid = [el[1] for el in training_data]
test_data_bookid = [el[1] for el in test_data]
#training_data_bookid_int = map(int, training_data_bookid) #python2
training_data_bookid_int = list(map(int, training_data_bookid)) #python3
test_data_bookid_int = list(map(int, test_data_bookid)) #python3
books_list = []
for j in range(0,2):
with open('source/to/file/output_new.csv', 'rt') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
get_book_id = training_data_bookid_int[j]
if get_book_id == int(row[0]):
books_list.append([row[2],row[1]])
b = sorted(books_list, reverse=True, key=lambda x:int(x[0]))
c = [el[1] for el in b]
c_int = list(map(int, c))
check_training_vs_test = set(c_int) & set(test_data_bookid_int)
with open("result.txt", "a") as text_file:
text_file.write("Userid: %i || Liked: %s || Test: %f" % (i, len(test_data), len(check_training_vs_test)))
Try following code
maxuserid = 7255
result_liked = []
for i in range(maxuserid): # this loop iterates through all users files
with open('source/to/file/user_id%d.csv' % (i+1),'r') as fin:
for row in csv.reader(fin, delimiter='\t'):
if int(row[2]) >= 1:
result_liked.append(row)
Update
I think you need something like:
maxuserid = 7255
for i in range(maxuserid):
result_liked = [] # form a separate list for each csv file
with open('source/to/file/user_id%i.csv' %(i+1),'r') as fin:
for row in csv.reader(fin, delimiter='\t'):
if int(row[2]) >= 1:
result_liked.append(row)
if len(result_liked) < 3: # if list too few elements just go to next file
continue
training_data = result_liked[:2]
test_data = result_liked[2:]
...

Categories

Resources