I am very new to python, and I have a python script to run for a particular file (input1.txt) and generated a output (output1.fasta), but I would like to run this script for multiple files, for example: input2.txt, input3.txt...and generate the respective output: output2.fasta, output3.fasta
from Bio import SeqIO
fasta_file = "sequences.txt"
wanted_file = "input1.txt"
result_file = "output1.fasta"
wanted = set()
with open(wanted_file) as f:
for line in f:
line = line.strip()
if line != "":
wanted.add(line)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
if seq.id in wanted:
SeqIO.write([seq], f, "fasta")
I tried to add the glob function, but I do not know how to deal with the output file name.
from Bio import SeqIO
import glob
fasta_file = "sequences.txt"
for filename in glob.glob('*.txt'):
wanted = set()
with open(filename) as f:
for line in f:
line = line.strip()
if line != "":
wanted.add(line)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
if seq.id in wanted:
SeqIO.write([seq], f, "fasta")
The error message is: NameError: name 'result_file' is not defined
Your glob is currently pulling your "sequences" file as well as the inputs because *.txt includes the sequences.txt file. If the "fasta" file is always the same and you only want to iterate the input files, then you need
for filename in glob.glob('input*.txt'):
Also, to iterate through your entire process, perhaps you want to put it within a method. And if the output filename is always created to correspond to the input, then you can create that dynamically.
from Bio import SeqIO
def create_fasta_outputs(fasta_file, wanted_file):
result_file = wanted_file.replace("input","output").replace(".txt",".fasta")
wanted = set()
with open(wanted_file) as f:
for line in f:
line = line.strip()
if line != "":
wanted.add(line)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
if seq.id in wanted:
SeqIO.write([seq], f, "fasta")
fasta_file = "sequences.txt"
for wanted_file in glob.glob('input*.txt'):
create_fasta_outputs(fasta_file, wanted_file)
Related
I have below function & I am trying to get/store the contents of text file in another temp file(removing unnecessary line) with appending special character.
But I also want the same content which is in temp text file with different special character next time but I am not able to do that.Below function is creating a temp file.To get desired output I need to create file every time with same function again which is not good way.Is there anything we can do without creating a temp/extra file and store the contents in return variable and append the special character whatever we want multiple times
import os
import re
def mainfest():
pathfile = "abc_12.txt"
with open(pathfile, 'r') as firstfile, open('tmp.txt', 'r') as s:
for line in firstfile:
if line.strip().startswith("-") or line.startswith("<"):
print"ignore")
elif re.search('\\S', line):
name = str(os.path.basename(line))
s.write("*" +fname)
def contents():
temppath = "temp.txt"
with open(temp path, 'r') as f:
lines = f.readlines()
lines+= lines
return lines
manifest()
value = contents()
file abc_12.txt
---ABC-123
nice/abc.py
xml/abc.py
<<NOP-123
bac\nice.py
abc.py
---CDEF-345
jkl.oy
abc.py
I want the contents of abc_12.txt file I can get in return something like that
abc.py
abc.py
nice.py
abc.py
jkl.oy
abc.py
and manipulate them wherever I want similar to below output
Output 1:
* abc.py
* abc.py
* nice.py
* abc.py
* jkl.oy
* abc.py
Output 2:
##abc.py
##abc.py
##nice.py
##abc.py
##jkl.oy
##abc.py
Maybe first you should read file, search names and keep on list
def read_data():
results = []
with open("abc_12.txt") as infile:
for line in infile:
if line.strip().startswith(("-", "<")): # `startswith`/`endswith` can use tuple
print("ignore:", line)
elif re.search('\\S', line):
name = os.path.basename(line)
results.append(name)
return results
And later you can use this list to create temp file or other file
data = read_data()
with open('temp.txt', 'w') as outfile:
for line in data:
outfile.write(f'* {line}')
#print(f'* {line}', end='')
with open('other.txt', 'w') as outfile:
for line in data:
outfile.write(f'##{line}')
#print(f'##{line}', end='')
EDIT:
Minimal working code.
I used io.StringIO only to simulate file in memory - so everyone can simply copy and test it.
import os
import re
import io
text = r'''---ABC-123
nice/abc.py
xml/abc.py
<<NOP-123
bac\nice.py
abc.py
---CDEF-345
jkl.oy
abc.py
'''
def read_data():
results = []
with io.StringIO(text) as infile:
#with open("abc_12.txt") as infile:
for line in infile:
line = line.strip()
if line:
if line.startswith(("-", "<")): # `startswith`/`endswith` can use tuple
print("ignore:", line)
else:
name = os.path.basename(line)
results.append(name)
return results
data = read_data()
with open('temp.txt', 'w') as outfile:
for line in data:
outfile.write(f'* {line}\n')
print(f'* {line}')
with open('other.txt', 'w') as outfile:
for line in data:
outfile.write(f'##{line}\n')
print(f'##{line}')
EDIT:
If you don't want to save in file then you still need for-loop to create string
data = read_data()
string_1 = ''
for line in data:
string_1 += f'* {line}\n'
string_2 = ''
for line in data:
string_2 += f'##{line}\n'
or to create new list (and eventually string)
data = read_data()
list_1 = []
for line in data:
list_1.append(f'* {line}')
list_2 = []
for line in data:
list_2.append(f'##{line}')
string_1 = "\n".join(list_1)
string_2 = "\n".join(list_2)
I'm reading in text files from the command line and I'm trying to produce output as follows...
Desired output given these command line arguments
Essentially, I want to read in files from the command line; take the first line from each file & print them on one line separated by a tab. Take the second line from each file & print them on the next line separated by a tab & so on.
This is the best code I've come up with (I'm a beginner and I've tried looking at other responses for far too long; glob & os hasn't been helping me understand how to do this; I'd just like to use basic loops and opening of files to do this):
import sys
l = []
list_files = sys.argv[:1]
for fname in list_files:
open(fname) as infile:
for line in infile:
line = line.strip()
if line == '':
l.append("''")
else:
l.append(line)
print(l) # List of all appended animals. Not in the right order
#(takes all names from one file, then all the names from the
#next instead of taking one line from every file on each iteration)
This is a minimally changed version that should work.
import sys
from itertools import zip_longest
files = []
list_files = sys.argv[:1]
for fname in list_files:
with open(fname) as infile: # Don't forget the `with`!
l = []
for line in infile:
line = line.strip()
if line == '':
l.append("''")
else:
l.append(line)
files.append(l) # list of lists
for lines in zip_longest(*files, fillvalue=''): # transpose list of lists
print(*lines, sep='\t') # separate with tabs.
The best way to open files in python is with with. More information can be found at https://www.pythonforbeginners.com/files/with-statement-in-python. Anyways:
import sys
if len(sys.argv) != 3:
sys.exit(1)
filename1 = sys.argv[1]
filename2 = sys.argv[2]
with open(filename1, 'r') as file1, open(filename2, 'r') as file2:
for line1, line2 in zip(file1, file2):
print(line1.strip(), line2.strip(), sep='\t')
This can be changed to allow for more than two files:
import sys
if len(sys.argv) != 3:
sys.exit(1)
filenames = sys.argv[1:]
all_lines = []
for filename in filenames:
with open(filename, 'r') as file:
all_lines.append([l.strip() for l in file.readlines()])
for line in zip(*all_lines):
print(*line, sep='\t')
I want to change this code to specifically read from line 1400001 to 1450000. What is modification?
file is composed of a single object type, one JSON-object per-line.
I want also to save the output to .csv file. what should I do?
revu=[]
with open("review.json", 'r',encoding="utf8") as f:
for line in f:
revu = json.loads(line[1400001:1450000)
If it is JSON per line:
revu=[]
with open("review.json", 'r',encoding="utf8") as f:
# expensive statement, depending on your filesize this might
# let you run out of memory
revu = [json.loads(s) for s in f.readlines()[1400001:1450000]]
if you do it on the /etc/passwd file it is easy to test (no json of course, so that is left out)
revu = []
with open("/etc/passwd", 'r') as f:
# expensive statement
revu = [s for s in f.readlines()[5:10]]
print(revu) # gives entry 5 to 10
Or you iterate over all lines, saving you from memory issues:
revu = []
with open("...", 'r') as f:
for i, line in enumerate(f):
if i >= 1400001 and i <= 1450000:
revu.append(json.loads(line))
# process revu
To CSV ...
import pandas as pd
import json
def mylines(filename, _from, _to):
with open(filename, encoding="utf8") as f:
for i, line in enumerate(f):
if i >= _from and i <= _to:
yield json.loads(line)
df = pd.DataFrame([r for r in mylines("review.json", 1400001, 1450000)])
df.to_csv("/tmp/whatever.csv")
I have many text files, and each of them has a empty line at the end. My scripts did not seem to remove them. Can anyone help please?
# python 2.7
import os
import sys
import re
filedir = 'F:/WF/'
dir = os.listdir(filedir)
for filename in dir:
if 'ABC' in filename:
filepath = os.path.join(filedir,filename)
all_file = open(filepath,'r')
lines = all_file.readlines()
output = 'F:/WF/new/' + filename
# Read in each row and parse out components
for line in lines:
# Weed out blank lines
line = filter(lambda x: not x.isspace(), lines)
# Write to the new directory
f = open(output,'w')
f.writelines(line)
f.close()
You can use Python's rstrip() function to do this as follows:
filename = "test.txt"
with open(filename) as f_input:
data = f_input.read().rstrip('\n')
with open(filename, 'w') as f_output:
f_output.write(data)
This will remove all empty lines from the end of the file. It will not change the file if there are no empty lines.
you can remove last empty line by using:
with open(filepath, 'r') as f:
data = f.read()
with open(output, 'w') as w:
w.write(data[:-1])
You can try this without using the re module:
filedir = 'F:/WF/'
dir = os.listdir(filedir)
for filename in dir:
if 'ABC' in filename:
filepath = os.path.join(filedir,filename)
f = open(filepath).readlines()
new_file = open(filepath, 'w')
new_file.write('')
for i in f[:-1]:
new_file.write(i)
new_file.close()
For each filepath, the code opens the file, reads in its contents line by line, then writes over the file, and lastly writes the contents of f to the file, except for the last element in f, which is the empty line.
You can remove the last blank line by the following command. This worked for me:
file = open(file_path_src,'r')
lines = file.read()
with open(file_path_dst,'w') as f:
for indx, line in enumerate(lines):
f.write(line)
if indx != len(lines) - 1:
f.write('\n')
i think this should work fine
new_file.write(f[:-1])
I would to filter out the sequences that I don't want based on a given file A.fasta. The original file contain all the sequences and fasta file is actually a file starts with sequence ID followed by its nucleotides that is represented by A, T, C, G. Can anyone help me?
A.fasta
>chr12:15747942-15747949
TGACATCA
>chr2:130918058-130918065
TGACCTCA
Original.fasta
>chr3:99679938-99679945
TGACGTAA
>chr9:135822160-135822167
TGACCTCA
>chr12:15747942-15747949
TGACATCA
>chr2:130918058-130918065
TGACCTCA
>chr2:38430457-38430464
TGACCTCA
>chr1:112381724-112381731
TGACATCA
Expected output for C.fasta
>chr3:99679938-99679945
TGACGTAA
>chr9:135822160-135822167
TGACCTCA
>chr2:38430457-38430464
TGACCTCA
>chr1:112381724-112381731
TGACATCA
Code
import sys
import warnings
from Bio import SeqIO
from Bio import BiopythonDeprecationWarning
warnings.simplefilter('ignore',BiopythonDeprecationWarning)
fasta_file = sys.argv[1] # Input fasta file
remove_file = sys.argv[2] # Input wanted file, one gene name per line
result_file = sys.argv[3] # Output fasta file
remove = set()
with open(remove_file) as f:
for line in f:
line = line.strip()
if line != "":
remove.add(line)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
nuc = seq.seq.tostring()
if nuc not in remove and len(nuc) > 0:
SeqIO.write([seq], f, "fasta")
The code above will filter out repeated sequences but I would like to keep the repeated sequences if it does appear in the output
Check out look at BioPython. Here is a solution using that:
from Bio import SeqIO
input_file = 'a.fasta'
merge_file = 'original.fasta'
output_file = 'results.fasta'
exclude = set()
fasta_sequences = SeqIO.parse(open(input_file),'fasta')
for fasta in fasta_sequences:
exclude.add(fasta.id)
fasta_sequences = SeqIO.parse(open(merge_file),'fasta')
with open(output_file, 'w') as output_handle:
for fasta in fasta_sequences:
if fasta.id not in exclude:
SeqIO.write([fasta], output_handle, "fasta")