My program recursively processes a string to reverse it. I would like to have it pull data directly from the website instead of a text file as it currently does, but I can't get it to pull the data from the website.
import urllib.request
def reverse(alist):
#print(alist)
if alist == []:
return []
else:
return reverse(alist[1:]) + [alist[0]]
def main():
#file1 = urllib.request.urlopen('http://devel.cs.stolaf.edu/parallel/data/cathat.txt').read()
file1 = open('cat.txt','r')
for line in file1:
stulist = line.split()
x = reverse(stulist)
print(' '.join(x))
file1.close()
main()
The commented-out lines are to show what I have tried.
You can use the url normally as a file:
import urllib
...
f = urllib.urlopen(url)
for line in f:
...
f.close()
What you did was to call read on the opened url. So you read all the content into file1 variable and file1 became a string.
For python 3:
import urllib.request
...
f = urllib.request.urlopen(url)
for line in f:
...
f.close()
Also you need to convert each line to the correct encoding. If the encoding is utf-8 then you can do the following:
for line in f:
line = line.decode("utf-8")
import urllib2
def reverse(alist):
if alist == []:
return []
else:
return reverse(alist[1:]) + [alist[0]]
def main():
lines = [line.strip() for line in urllib2.urlopen('http://devel.cs.stolaf.edu/parallel/data/cathat.txt')]
print lines
print lines[::-1]
main()
Output
['The cat in the party hat', 'wore the hat', 'to the cat hat party.']
['to the cat hat party.', 'wore the hat', 'The cat in the party hat']
Related
I have below function & I am trying to get/store the contents of text file in another temp file(removing unnecessary line) with appending special character.
But I also want the same content which is in temp text file with different special character next time but I am not able to do that.Below function is creating a temp file.To get desired output I need to create file every time with same function again which is not good way.Is there anything we can do without creating a temp/extra file and store the contents in return variable and append the special character whatever we want multiple times
import os
import re
def mainfest():
pathfile = "abc_12.txt"
with open(pathfile, 'r') as firstfile, open('tmp.txt', 'r') as s:
for line in firstfile:
if line.strip().startswith("-") or line.startswith("<"):
print"ignore")
elif re.search('\\S', line):
name = str(os.path.basename(line))
s.write("*" +fname)
def contents():
temppath = "temp.txt"
with open(temp path, 'r') as f:
lines = f.readlines()
lines+= lines
return lines
manifest()
value = contents()
file abc_12.txt
---ABC-123
nice/abc.py
xml/abc.py
<<NOP-123
bac\nice.py
abc.py
---CDEF-345
jkl.oy
abc.py
I want the contents of abc_12.txt file I can get in return something like that
abc.py
abc.py
nice.py
abc.py
jkl.oy
abc.py
and manipulate them wherever I want similar to below output
Output 1:
* abc.py
* abc.py
* nice.py
* abc.py
* jkl.oy
* abc.py
Output 2:
##abc.py
##abc.py
##nice.py
##abc.py
##jkl.oy
##abc.py
Maybe first you should read file, search names and keep on list
def read_data():
results = []
with open("abc_12.txt") as infile:
for line in infile:
if line.strip().startswith(("-", "<")): # `startswith`/`endswith` can use tuple
print("ignore:", line)
elif re.search('\\S', line):
name = os.path.basename(line)
results.append(name)
return results
And later you can use this list to create temp file or other file
data = read_data()
with open('temp.txt', 'w') as outfile:
for line in data:
outfile.write(f'* {line}')
#print(f'* {line}', end='')
with open('other.txt', 'w') as outfile:
for line in data:
outfile.write(f'##{line}')
#print(f'##{line}', end='')
EDIT:
Minimal working code.
I used io.StringIO only to simulate file in memory - so everyone can simply copy and test it.
import os
import re
import io
text = r'''---ABC-123
nice/abc.py
xml/abc.py
<<NOP-123
bac\nice.py
abc.py
---CDEF-345
jkl.oy
abc.py
'''
def read_data():
results = []
with io.StringIO(text) as infile:
#with open("abc_12.txt") as infile:
for line in infile:
line = line.strip()
if line:
if line.startswith(("-", "<")): # `startswith`/`endswith` can use tuple
print("ignore:", line)
else:
name = os.path.basename(line)
results.append(name)
return results
data = read_data()
with open('temp.txt', 'w') as outfile:
for line in data:
outfile.write(f'* {line}\n')
print(f'* {line}')
with open('other.txt', 'w') as outfile:
for line in data:
outfile.write(f'##{line}\n')
print(f'##{line}')
EDIT:
If you don't want to save in file then you still need for-loop to create string
data = read_data()
string_1 = ''
for line in data:
string_1 += f'* {line}\n'
string_2 = ''
for line in data:
string_2 += f'##{line}\n'
or to create new list (and eventually string)
data = read_data()
list_1 = []
for line in data:
list_1.append(f'* {line}')
list_2 = []
for line in data:
list_2.append(f'##{line}')
string_1 = "\n".join(list_1)
string_2 = "\n".join(list_2)
Here below is my code about how to edit text file.
Since python can't just edit a line and save it at the same time,
I save the previous text file's content into a list first then write it out.
For example,if there are two text files called sample1.txt and sample2.txt in the same folder.
Sample1.txt
A for apple.
Second line.
Third line.
Sample2.txt
First line.
An apple a day.
Third line.
Execute python
import glob
import os
#search all text files which are in the same folder with python script
path = os.path.dirname(os.path.abspath(__file__))
txtlist = glob.glob(path + '\*.txt')
for file in txtlist:
fp1 = open(file, 'r+')
strings = [] #create a list to store the content
for line in fp1:
if 'apple' in line:
strings.append('banana\n') #change the content and store into list
else:
strings.append(line) #store the contents did not be changed
fp2 = open (file, 'w+') # rewrite the original text files
for line in strings:
fp2.write(line)
fp1.close()
fp2.close()
Sample1.txt
banana
Second line.
Third line.
Sample2.txt
First line.
banana
Third line.
That's how I edit specific line for text file.
My question is : Is there any method can do the same thing?
Like using the other functions or using the other data type rather than list.
Thank you everyone.
Simplify it to this:
with open(fname) as f:
content = f.readlines()
content = ['banana' if line.find('apple') != -1 else line for line in content]
and then write value of content to file back.
Instead of putting all the lines in a list and writing it, you can read it into memory, replace, and write it using same file.
def replace_word(filename):
with open(filename, 'r') as file:
data = file.read()
data = data.replace('word1', 'word2')
with open(filename, 'w') as file:
file.write(data)
Then you can loop through all of your files and apply this function
The built-in fileinput module makes this quite simple:
import fileinput
import glob
with fileinput.input(files=glob.glob('*.txt'), inplace=True) as files:
for line in files:
if 'apple' in line:
print('banana')
else:
print(line, end='')
fileinput redirects print into the active file.
import glob
import os
def replace_line(file_path, replace_table: dict) -> None:
list_lines = []
need_rewrite = False
with open(file_path, 'r') as f:
for line in f:
flag_rewrite = False
for key, new_val in replace_table.items():
if key in line:
list_lines.append(new_val+'\n')
flag_rewrite = True
need_rewrite = True
break # only replace first find the words.
if not flag_rewrite:
list_lines.append(line)
if not need_rewrite:
return
with open(file_path, 'w') as f:
[f.write(line) for line in list_lines]
if __name__ == '__main__':
work_dir = os.path.dirname(os.path.abspath(__file__))
txt_list = glob.glob(work_dir + '/*.txt')
replace_dict = dict(apple='banana', orange='grape')
for txt_path in txt_list:
replace_line(txt_path, replace_dict)
I have 2 numbers in two similar files. There is a new.txt and original.txt. They both have the same string in them except for a number. The new.txt has a string that says boothNumber="3". The original.txt has a string that says boothNumber="1".
I want to be able to read the new.txt, pick the number 3 out of it and replace the number 1 in original.txt.
Any suggestions? Here is what I am trying.
import re # used to replace string
import sys # some of these are use for other code in my program
def readconfig():
with open("new.text") as f:
with open("original.txt", "w") as f1:
for line in f:
match = re.search(r'(?<=boothNumber=")\d+', line)
for line in f1:
pattern = re.search(r'(?<=boothNumber=")\d+', line)
if re.search(pattern, line):
sys.stdout.write(re.sub(pattern, match, line))
When I run this, my original.txt gets completely cleared of any text.
I did a traceback and I get this:
in readconfig
for line in f1:
io.UnsupportedOperationo: not readable
UPDATE
I tried:
def readconfig(original_txt_path="original.txt",
new_txt_path="new.txt"):
with open(new_txt_path) as f:
for line in f:
if not ('boothNumber=') in line:
continue
booth_number = int(line.replace('boothNumber=', ''))
# do we need check if there are more than one 'boothNumber=...' line?
break
with open(original_txt_path) as f1:
modified_lines = [line.startswith('boothNumber=') if not line
else 'boothNumber={}'.format(booth_number)
for line in f1]
with open(original_txt_path, mode='w') as f1:
f1.writelines(modified_lines)
And I get error:
booth_number = int(line.replace('boothNumber=', ''))
ValueError: invalid literal for int() with base 10: '
(workstationID="1" "1" window=1= area="" extra parts of the line here)\n
the "1" after workstationID="1" is where the boothNumber=" " would normally go. When I open up original.txt, I see that it actually did not change anything.
UPDATE 3
Here is my code in full. Note, the file names are changed but I'm still trying to do the same thing. This is another idea or revision I had that is still not working:
import os
import shutil
import fileinput
import re # used to replace string
import sys # prevents extra lines being inputed in config
# example: sys.stdout.write
def convertconfig(pattern):
source = "template.config"
with fileinput.FileInput(source, inplace=True, backup='.bak') as file:
for line in file:
match = r'(?<=boothNumber=")\d+'
sys.stdout.write(re.sub(match, pattern, line))
def readconfig():
source = "bingo.config"
pattern = r'(?<=boothNumber=")\d+' # !!!!!!!!!! This probably needs fixed
with fileinput.FileInput(source, inplace=True, backup='.bak') as file:
for line in file:
if re.search(pattern, line):
fileinput.close()
convertconfig(pattern)
def copyfrom(servername):
source = r'//' + servername + '/c$/remotedirectory'
dest = r"C:/myprogram"
file = "bingo.config"
try:
shutil.copyfile(os.path.join(source, file), os.path.join(dest, file))
except:
print ("Error")
readconfig()
# begin here
os.system('cls' if os.name == 'nt' else 'clear')
array = []
with open("serverlist.txt", "r") as f:
for servername in f:
copyfrom(servername.strip())
bingo.config is my new file
template.config is my original
It's replacing the number in template.config with the literal string "r'(?<=boothNumber=")\d+'"
So template.config ends up looking like
boothNumber="r'(?<=boothNumber=")\d+'"
instead of
boothNumber="2"
To find boothNumber value we can use next regular expression (checked with regex101)
(?<=\sboothNumber=\")(\d+)(?=\")
Something like this should work
import re
import sys # some of these are use for other code in my program
BOOTH_NUMBER_RE = re.compile('(?<=\sboothNumber=\")(\d+)(?=\")')
search_booth_number = BOOTH_NUMBER_RE.search
replace_booth_number = BOOTH_NUMBER_RE.sub
def readconfig(original_txt_path="original.txt",
new_txt_path="new.txt"):
with open(new_txt_path) as f:
for line in f:
search_res = search_booth_number(line)
if search_res is None:
continue
booth_number = int(search_res.group(0))
# do we need check if there are more than one 'boothNumber=...' line?
break
else:
# no 'boothNumber=...' line was found, so next lines will fail,
# maybe we should raise exception like
# raise Exception('no line starting with "boothNumber" was found')
# or assign some default value
# booth_number = -1
# or just return?
return
with open(original_txt_path) as f:
modified_lines = []
for line in f:
search_res = search_booth_number(line)
if search_res is not None:
line = replace_booth_number(str(booth_number), line)
modified_lines.append(line)
with open(original_txt_path, mode='w') as f:
f.writelines(modified_lines)
Test
# Preparation
with open('new.txt', mode='w') as f:
f.write('some\n')
f.write('<jack Fill workstationID="1" boothNumber="56565" window="17" Code="" area="" section="" location="" touchScreen="False" secureWorkstation="false">')
with open('original.txt', mode='w') as f:
f.write('other\n')
f.write('<jack Fill workstationID="1" boothNumber="23" window="17" Code="" area="" section="" location="" touchScreen="False" secureWorkstation="false">')
# Invocation
readconfig()
# Checking output
with open('original.txt') as f:
for line in f:
# stripping newline character
print(line.rstrip('\n'))
gives
other
<jack Fill workstationID="1" boothNumber="56565" window="17" Code="" area="" section="" location="" touchScreen="False" secureWorkstation="false">
Just a quick question, I am trying to create a simple spider that will access the sitemap.xml of a site and save the urls in a notepad, I have the code below but it only saves 1 of the URLs in the notepad.
It seems to print all of the information I need in the CMD but not in the txt
import urllib2 as ur
import re
f = ur.urlopen(u'http://www.site.co.uk/sitemap.xml')
res = f.readlines()
for d in res:
data = re.findall('<loc>(http:\/\/.+)<\/loc>',d)
for i in data:
print i
file = open("sitemapdata.txt", "w")
file.write(i)
file.close()
Thanks in advance.
As soon as I posted this, I realised what went wrong. I accidentally left out +'\n' and needed to change: file = open("sitemapdata.txt", "a")
import urllib2 as ur
import re
f = ur.urlopen(u'http://www.site.co.uk/sitemap.xml')
res = f.readlines()
for d in res:
data = re.findall('<loc>(http:\/\/.+)<\/loc>',d)
for i in data:
print i
file = open("sitemapdata.txt", "a")
file.write(i +'\n')
file.close()
I have a script to clean urls to get base domains from example.com/example1 and example.com/example2 down to example.com My issue is when it goes to through the file of urls it will have duplicate base domains. I want to remove the duplicates while printing the urls to a file. below is the code I currently have.
enter from Tkinter import *
import tkFileDialog
import re
def main():
fileOpen = Tk()
fileOpen.withdraw() #hiding tkinter window
file_path = tkFileDialog.askopenfilename(
title="Open file", filetypes=[("txt file",".txt")])
if file_path != "":
print "you chose file with path:", file_path
else:
print "you didn't open anything!"
fin = open(file_path)
fout = open("URL Cleaned.txt", "wt")
for line in fin.readlines():
editor = (line.replace('[.]', '.')
.replace('[dot]', '.')
.replace('hxxp://www.', '')
.replace('hxxps://www.', '')
.replace('hxxps://', '')
.replace('hxxp://', '')
.replace('www.', '')
.replace('http://www.', '')
.replace('https://www.', '')
.replace('https://', '')
.replace('http://', ''))
editor = re.sub(r'/.*', '', editor)
if __name__ == '__main__':
main()
Any help is appreciated. I have scoured the posts and tried all of the suggestions for my issue and have not found one that works.
You can use regular expresion to find the base domains.
If you have one url per line in your file:
import re
def main():
file = open("url.txt",'r')
domains = set()
# will works for any web like https://www.domain.com/something/somethingmore... , also without www, without https or just for www.domain.org
matcher= re.compile("(h..ps?://)?(?P<domain>(www\.)?[^/]*)/?.*")
for line in file:
# make here any replace you need with obfuscated urls like: line = line.replace('[.]','.')
if line[-1] == '\n': # remove "\n" from end of line if present
line = line[0:-1]
match = matcher.search(line)
if match != None: # If a url has been found
domains.add(match.group('domain'))
print domains
file.close()
main()
For example, with this file, it will print:
set(['platinum-shakers.net', 'wmi.ns01.us', 'adservice.no-ip.org', 'samczeruno.pl', 'java.ns1.name', 'microsoft.dhcp.biz', 'ids.us01.us', 'devsite.quostar.com', 'orlandmart.com'])
perhaps you could use a regular expression:
import re
p = re.compile(r".*\.com/(.*)") # to get for instance 'example1' or 'example2' etc.
with open(file_path) as fin, open("URL Cleaned.txt", "wt") as fout:
lines = fin.readlines():
bases = set(re.search(p, line).groups()[0] for line in lines if len(line) > 1)
for b in bases:
fout.write(b)
Using with open(..) auto closes the files after the executing the block of code
Output:
Using a text file with:
www.example.com/example1
www.example.com/example2
# blank lines are accounted for
www.example.com/example3
www.example.com/example4
www.example.com/example4 # as are duplicates
as the lines, I got the output,
example1
example2
example3
example4