ping using python and save to a file [duplicate] - python

This question already has answers here:
How do I append to a file?
(13 answers)
Closed 6 years ago.
Ping_Python
Below is Code to ping hosts and create a CSV file out of the results.
import os
for i in range (0,255):
for j in range(1,254):
hostname = "10.222.{0}.{1}".format(i,j)
response = os.system ("ping -n 1 " + hostname)
if response == 0:
fp = open("C:\\Users\\anudeepa\\Desktop\\hostname.csv", 'w')
fp.writelines(hostname + "host up\n")
else:
fp = open("C:\\Users\\anudeepa\\Desktop\\hostname.csv", 'w')
fp.write(hostname + "host dead\n")
This code allows me to ping hosts,but while writing the results to a CSV, it overwrites the previously written result and only writes penultimate or unltimate result.

Change both of
fp = open("C:\\Users\\anudeepa\\Desktop\\hostname.csv", 'w')
to
fp = open("C:\\Users\\anudeepa\\Desktop\\hostname.csv", 'a')
in order to open the file in append mode.
You can also improve your code by using with, so you don't open the file every iteration:
import os
with open("C:\\Users\\anudeepa\\Desktop\\hostname.csv", 'a') as fp:
for i in range (0,255):
for j in range(1,254):
hostname = "10.222.{0}.{1}".format(i,j)
response = os.system ("ping -n 1 " + hostname)
if response == 0:
fp.writelines(hostname + "host up\n")
else:
fp.write(hostname + "host dead\n")
This will also have the benefit of closing the file when the script ends.

Related

how do I make python save each output without overwriting the file? [duplicate]

This question already has answers here:
Python loop overwrites previous text written to json file [duplicate]
(2 answers)
Closed 5 months ago.
I am practicing website hacking and I have this script. I want it to save each finding in one file. It currently replaces what it found with the new finding.
import requests
def request(url):
try:
return requests.get("http://" + url)
except requests.exceptions.ConnectionError:
pass
except requests.exceptions.InvalidURL:
pass
except requests.urllib3.exceptions.LocationParseError:
pass
target_url = "192.168.1.39/mutillidae/"
with open("/home/kali/PycharmProjects/websitesub/common.txt", "r") as wordlist_file:
for line in wordlist_file:
word = line.strip()
test_url = target_url + "/" + word
response = request(test_url)
if response:
with open("/home/kali/PycharmProjects/websitesub/output.txt", "w") as f:
f.write("DIR" + test_url)
print("DIR" + test_url)
you can define a new output filename every time you save the file
for example add the count of file in the directory
dir_path = r'\home\kali\pycharmprojects\websitesub'
count = len([entry for entry in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, entry))])
filename = f"output+{count}.txt"
output_file= os.path.join(dirpath, filename)
if response:
with open(output_file, 'w') as f:
.....
this should work
with open("data1.txt", "a", encoding="utf-8") as file:
file.write(the thing you want to write)

Print variable to txt file [duplicate]

This question already has answers here:
Print string to text file
(8 answers)
Closed 2 years ago.
I am working on making a separate txt file with the output data from str(velocity). Currently it prints fine in the terminal but no success in printing it to a txt file.
import socket
import numpy as np
import pigpio
#------------------------Definingfunctions and variables ------------------#
pi = pigpio.pi()
pi.set_mode(21,pigpio.INPUT)
pulseDegrees = 2*np.pi/38
T_old = 0
count = 0
def cbf(g ,L ,t):
T_new = t
global velocity, T_old
velocity = pulseDegrees/(T_new-T_old)*(1/0.000001)
print(str(velocity))
T_old = T_new
def sendData():
conn.recv (1024)
conn.send(str(velocity).encode('UTF-8'))
#--------------------------Communication--------------------#
# The units ipaddress
print ("Awaiting connection")
port=5555
s=socket.socket()
s.bind(('' ,port))
s.listen(1)
(conn,addr)=s.accept()
print ("Connected to " + str(addr))
#--------------------------Main loop--------------------#
cb = pi.callback(21,pigpio.RISING_EDGE, cbf)
while True:
sendData ()-
Use open(file, mode) with the pathname of a file as file and mode as "w" to open the file for writing. Call file. write(data) with data as the string formats "%s %d" followed by % and a tuple containing a string of the variable name, and the variable. Hope this works!
You can try
file = open("fileName.txt","w")
file.write(str(velocity) + "\n")
file.close()
"w" in line 1 means you are writing to the file. Adding "\n" means here is the end of the content.

Python for loop only goes through once

I'm writing a script to search through multiple text files with mac addresses in them to find what port they are associated with. I need to do this for several hundred mac addresses. The function runs the first time through fine. After that though the new mac address doesn't get passed to the function it remains as the same one it already used and the functions for loop only seems to run once.
import re
import csv
f = open('all_switches.csv','U')
source_file = csv.reader(f)
m = open('macaddress.csv','wb')
macaddress = csv.writer(m)
s = open('test.txt','r')
source_mac = s.read().splitlines()
count = 0
countMac = 0
countFor = 0
def find_mac(sneaky):
global count
global countFor
count = count +1
for switches in source_file:
countFor = countFor + 1
# print sneaky only goes through the loop once
switch = switches[4]
source_switch = open(switch + '.txt', 'r')
switch_read = source_switch.readlines()
for mac in switch_read:
# print mac does search through all the switches
found_mac = re.search(sneaky, mac)
if found_mac is not None:
interface = re.search("(Gi|Eth|Te)(\S+)", mac)
if interface is not None:
port = interface.group()
macaddress.writerow([sneaky, switch, port])
print sneaky + ' ' + switch + ' ' + port
source_switch.close()
for macs in source_mac:
match = re.search(r'[a-fA-F0-9]{4}[.][a-fA-F0-9]{4}[.][a-fA-F0-9]{4}', macs)
if match is not None:
sneaky = match.group()
find_mac(sneaky)
countMac = countMac + 1
print count
print countMac
print countFor
I've added the count countFor and countMac to see how many times the loops and functions run. Here is the output.
549f.3507.7674 the name of the switch Eth100/1/11
677
677
353
Any insight would be appreciated.
source_file is opened globally only once, so the first time you execute call find_mac(), the for switches in source_file: loop will exhaust the file. Since the file wasn't closed and reopened, the next time find_mac() is called the file pointer is at the end of the file and reads nothing.
Moving the following to the beginning of find_mac should fix it:
f = open('all_switches.csv','U')
source_file = csv.reader(f)
Consider using with statements to ensure your files are closed as well.

Segmentation Fault

I am using python 2.4.4 (old machine, can't do anything about it) on a UNIX machine. I am extremely new to python/programming and have never used a UNIX machine before. This is what I am trying to do:
extract a single sequence from a FASTA file (proteins + nucleotides) to a temporary text file.
Give this temporary file to a program called 'threader'
Append the output from threader (called tempresult.out) to a file called results.out
Remove the temporary file.
Remove the tempresult.out file.
Repeat using the next FASTA sequence.
Here is my code so far:
import os
from itertools import groupby
input_file = open('controls.txt', 'r')
output_file = open('results.out', 'a')
def fasta_parser(fasta_name):
input = fasta_name
parse = (x[1] for x in groupby(input, lambda line: line[0] == ">"))
for header in parse:
header = header.next()[0:].strip()
seq = "\n".join(s.strip() for s in parse.next())
yield (header, '\n', seq)
parsedfile = fasta_parser(input_file)
mylist = list(parsedfile)
index = 0
while index < len(mylist):
temp_file = open('temp.txt', 'a+')
temp_file.write(' '.join(mylist[index]))
os.system('threader' + ' temp.txt' + ' tempresult.out' + ' structures.txt')
os.remove('temp.txt')
f = open('tempresult.out', 'r')
data = str(f.read())
output_file.write(data)
os.remove('tempresult.out')
index +=1
output_file.close()
temp_file.close()
input_file.close()
When I run this script I get the error 'Segmentation Fault'. From what I gather this is to do with me messing with memory I shouldn't be messing with (???). I assume it is something to do with the temporary files but I have no idea how I would get around this.
Any help would be much appreciated!
Thanks!
Update 1:
Threader works fine when I give it the same sequence multiple times like this:
import os
input_file = open('control.txt', 'r')
output_file = open('results.out', 'a')
x=0
while x<3:
os.system('threader' + ' control.txt' + ' tempresult.out' + ' structures.txt')
f = open('tempresult.out', 'r')
data = str(f.read())
output_file.write(data)
os.remove('result.out')
x += 1
output_file.close()
input_file.close()
Update 2: In the event that someone else gets this error. I forgot to close temp.txt before invoking the threader program.

Python 2.7 CSV writer issue

I have some Python code that lists pull requests in Github. If I print the parsed json output to the console, I get the expected results, but when I output the parsed json to a csv file, I'm not getting the same results. They are cut off after the sixth result (and that varies).
What I'm trying to do is overwrite the csv each time with the latest output.
Also, I'm dealing with unicode output which I use unicodecsv for. I don't know if this is throwing the csv output off.
I will list both instances of the relevant piece of code with the print statement and with the csv code.
Thanks for any help.
import sys
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
import csv
import unicodecsv
for pr in result:
data = pr.as_dict()
changes = (gh.repository('my-repo', repo).pull_request(data['number'])).as_dict()
if changes['commits'] == 1 and changes['changed_files'] == 1:
#keep print to console for testing purposes
print "Login: " + changes['user']['login'] + '\n' + "Title: " + changes['title'] + '\n' + "Changed Files: " + str(changes['changed_files']) + '\n' + "Commits: " + str(changes['commits']) + '\n'
With csv:
import sys
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
import csv
import unicodecsv
for pr in result:
data = pr.as_dict()
changes = (gh.repository('my-repo', repo).pull_request(data['number'])).as_dict()
if changes['commits'] == 1 and changes['changed_files'] == 1:
with open('c:\pull.csv', 'r+') as f:
csv_writer = unicodecsv.writer(f, encoding='utf-8')
csv_writer.writerow(['Login', 'Title', 'Changed files', 'Commits'])
for i in changes['user']['login'], changes['title'], str(changes['changed_files']), str(changes['commits']) :
csv_writer.writerow([changes['user']['login'], changes['title'],changes['changed_files'], changes['commits']])
The problem is with the way you write data to file.
Every time you open file in r+ mode you will overwrite the last written rows.
And for dealing with JSON

Categories

Resources