Python counting occurrences across multiple lines using loops - python

I want a quick pythonic method to give me a count in a loop. I am actually too embarrassed to post up my solutions which are currently not working.
Given a sample from a text file structured follows:
script7
BLANK INTERRUPTION
script2
launch4.VBS
script3
script8
launch3.VBS
script5
launch1.VBS
script6
I want a count of all times script[y] is followed by a launch[X]. Launch has a range of values from 1-5, whilst script has range of 1-15.
Using script3 as an example, I would need a count for each of the following in a given file:
script3
launch1
#count this
script3
launch2
#count this
script3
launch3
#count this
script3
launch4
#count this
script3
launch4
#count this
script3
launch5
#count this
I think the sheer number of loops involved here has surpassed my knowledge of Python. Any assistance would be greatly appreciated.

Why not use a multi-line regex - then the script becomes:
import re
# read all the text of the file, and clean it up
with open('counts.txt', 'rt') as f:
alltext = '\n'.join(line.strip() for line in f)
# find all occurrences of the script line followed by the launch line
cont = re.findall('^script(\d)\nlaunch(\d+)\.VBS\n(?mi)',alltext)
# accumulate the counts of each launch number for each script number
# into nested dictionaries
scriptcounts = {}
for scriptnum,launchnum in cont:
# if we haven't seen this scriptnumber before, create the dictionary for it
if scriptnum not in scriptcounts:
scriptcounts[scriptnum]={}
# if we haven't seen this launchnumber with this scriptnumber before,
# initialize count to 0
if launchnum not in scriptcounts[scriptnum]:
scriptcounts[scriptnum][launchnum] = 0
# incremement the count for this combination of script and launch number
scriptcounts[scriptnum][launchnum] += 1
# produce the output in order of increasing scriptnum/launchnum
for scriptnum in sorted(scriptcounts.keys()):
for launchnum in sorted(scriptcounts[scriptnum].keys()):
print "script%s\nlaunch%s.VBS\n# count %d\n"%(scriptnum,launchnum,scriptcounts[scriptnum][launchnum])
The output (in the format you requested) is, for example:
script2
launch1.VBS
# count 1
script2
launch4.VBS
# count 1
script5
launch1.VBS
# count 1
script8
launch3.VBS
# count 3
re.findall() returns a list of all the matches - each match is a list of the () parts of the pattern except the (?mi) which is a directive to tell the regular expression matcher to work across line ends \n and to match case insensitive. The regex pattern as it stands e.g. fragment 'script(\d)' pulls out the digit following the script/launch into the match - this could as easily include 'script' by being '(script\d)', similarly '(launch\d+\.VBS)' and only the printing would need modification to handle this variation.
HTH
barny

Here is my solution using defaultdict with Counters and regex with lookahead.
import re
from collections import Counter, defaultdict
with open('in.txt', 'r') as f:
# make sure we have only \n as lineend and no leading or trailing whitespaces
# this makes the regex less complex
alltext = '\n'.join(line.strip() for line in f)
# find keyword script\d+ and capture it, then lazy expand and capture everything
# with lookahead so that we stop as soon as and only if next word is 'script' or
# end of the string
scriptPattern = re.compile(r'(script\d+)(.*?)(?=script|\n?$)', re.DOTALL)
# just find everything that matches launch\d+
launchPattern = re.compile(r'launch\d+')
# create a defaultdict with a counter for every entry
scriptDict = defaultdict(Counter)
# go through all matches
for match in scriptPattern.finditer(alltext):
script, body = match.groups()
# update the counter of this script
scriptDict[script].update(launchPattern.findall(body))
# print the results
for script in sorted(scriptDict):
counter = scriptDict[script]
if len(counter):
print('{} launches:'.format(script))
for launch in sorted(counter):
count = counter[launch]
print('\t{} {} time(s)'.format(launch, count))
else:
print('{} launches nothing'.format(script))
Using the string on regex101 (see link above) I get the following result:
script2 launches:
launch4 1 time(s)
script3 launches nothing
script5 launches:
launch1 1 time(s)
script6 launches nothing
script7 launches nothing
script8 launches:
launch3 1 time(s)

Here's an approach which uses nested dictionaries. Please tell me if you would like the output to be in a different format:
#!/usr/bin/env python3
import re
script_dict={}
with open('infile.txt','r') as infile:
scriptre = re.compile(r"^script\d+$")
for line in infile:
line = line.rstrip()
if scriptre.match(line) is not None:
script_dict[line] = {}
infile.seek(0) # go to beginning
launchre = re.compile(r"^launch\d+\.[vV][bB][sS]$")
current=None
for line in infile:
line = line.rstrip()
if line in script_dict:
current=line
elif launchre.match(line) is not None and current is not None:
if line not in script_dict[current]:
script_dict[current][line] = 1
else:
script_dict[current][line] += 1
print(script_dict)

You could use setdefault method
code:
dic={}
with open("a.txt") as inp:
check=0
key_string=""
for line in inp:
if check:
if line.strip().startswith("launch") and int(line.strip()[6])<6:
print "yes"
dic[key_string]=dic.setdefault(key_string,0)+1
check=0
if line.strip().startswith("script"):
key_string=line.strip()
check=1
For your given input the output would be
output:
{"script3":6}

Related

counting the unique words in a text file

Some of the unique words in the text file does not count and I've had no idea what's wrong in my code.
file = open('tweets2.txt','r')
unique_count = 0
lines = file.readlines()
line = lines[3]
per_word = line.split()
for i in per_word:
if line.count(i) == 1:
unique_count=unique_count + 1
print(unique_count)
file.close()
Here is the text file:
"I love REDACTED and Fiesta and all but can REDACTED host more academic-related events besides strand days???"
The output of this code is:
16
The expected output of the code came from the text file should be:
17
"i will crack a raw egg on my head if REDACTED move the resumption of classes to Jan 7. im not even kidding."
The output of this code is:
20
The expected output of the code came from the text file should be:
23
If you want to count the number of unique whitespace delimited tokens (case-sensitive) in the entire file then:
with open('myfile.txt') as infile:
print(len(set(infile.read().split())))
Maybe count() works with chars not words, instead use python way with set() function to clear duplicated words?
per_word = set(line.split())
print (len(per_word))
You are counting each word as a substring in the whole line because you do:
for i in per_word:
if line.count(i) == 1:
So now some words are repeated as substrings, but not as words. For example, the first word is "i". line.count("i") gives 7 (it is also in "if", "im", etc.) so you don't count it as a unique word (even though it is). If you do:
for i in per_word:
if per_word.count(i) == 1:
then you will count each word as a whole word and get the output you need.
Anyway this is very inefficient (O(n^2)) as you iterate over each word and then count iterates over the whole list again to count it. Either use a set as suggested in other answers or use a Counter:
from collections import Counter
unique_count = 0
line = "i will crack a raw egg on my head if REDACTED move the resumption of classes to Jan 7. im not even kidding."
per_word = line.split()
counter = Counter(per_word)
for count in counter.values():
if count == 1:
unique_count += 1
# Or simply
unique_count = sum(count == 1 for count in counter.values())
print(unique_count)

CS50 PSET6 DNA no match using regex to count STR

I have been stuck at this point for quite a while, hope to get some tips.
The problem can be simplified as to find what is the largest consecutive occurrence of a pattern in a string. As a pattern AATG, for a string like ATAATGAATGAATGGAATG the right result should be 3. I tired to count the occurrences of the pattern by using re.compile(). I have found out from the doc that if i want to find consecutive occurrence of a pattern i possibly have to use special character +. For instance, a pattern like AATG i have to use re.compile(r'(AATG)+') instead of re.compile(r'AATG'). Otherwise, the occurrences will be overcounted. However, in this program the pattern is not a fixed string. I have treat it as a variable. I have tried many ways to put it into re.compile() without positive results. Could anyone enlighten me the correct way to format it (which is in the Function def countSTR below)?
After that, i think finditer(the_string_to_be_analysis) should return a iterator including all matches found. Then i used match.end() - match.start() to obtain the length of every match to compare with each other in order to get the longest consecutive occurrence of the pattern. maybe something goes wrong there?
code attached. Every input would be appreciated!
from sys import argv, exit
import csv
import re
def main():
if len(argv) != 3:
print("Usage: python dna.py data.csv sequence.txt")
exit(1)
# read DNA sequence
with open(argv[2], "r") as file:
if file.mode != 'r':
print(f"database {argv[2]} can not be read")
exit(1)
sequence = file.read()
# read database.csv
with open(argv[1], newline='') as file:
if file.mode != 'r':
print(f"database {argv[1]} can not be read")
exit(1)
# get the heading of the csv file in order to obtain STRs
csv_reader = csv.reader(file)
headings = next(csv_reader)
# dictionary to store STRs match result of DNA-sequence
STR_counter = {}
for STR in headings[1::]:
# entry result accounting to the STR keys
STR_counter[STR] = countSTR(STR, sequence)
# read csv file as a dictionary
with open(argv[1], newline='') as file:
database = csv.DictReader(file)
for row in database:
count = 0
for STR in STR_counter:
# print("row in database ", row[STR], "STR in STR_counter", STR_counter[STR])
if int(row[STR]) == int(STR_counter[STR]):
count += 1
if count == len(STR_counter):
print(row['name'])
exit(0)
else:
print("No match")
# find non-overlapping occurrences of STR in DNA-sequence
def countSTR(STR, sequence):
count = 0
maxcount = 0
# in order to match repeat STR. for example: "('AATG')+" as pattern
# into re.compile() to match repeat STR
# rewrite STR to "(STR)+"
STR = "(" + STR + ")+"
pattern = re.compile(r'STR')
# matches should be a iterator object
matches = pattern.finditer(sequence)
# go throgh every repeat and find the longest one
# by match.end() - match.start()
for match in matches:
count = match.end() - match.start()
if count > maxcount:
maxcount = count
# return repeat times of the longest repeat
return maxcount/len(STR)
main()
just find out a correct way to get the desired result.
post it here in case any others are also confused.
From what I have understand, to match a variable named var_pattern could use re.compile(rf'{var_pattern}'). Then if consecutive occurrences of the var_pattern should be searched, could use re.compile(rf'(var_pattern)+'). There may be other smarter ways to implement that, however i managed to get it work as fine as previously .

Python to sort and count the uniq names from the file

I'm trying to read the log file in Linux /var/log/messages for a line having special pattern of strings which I have given below. From this line pattern I'm looking at the e-mail address for the user, like rajeshm#noi-rajeshm.fox.com and using the str.partition() method I'm separating it into two parts as a list index, and taking the first one further getting that split into a list for the ease of taking last index value, which is the user ID and that's working fine.
Saying that I'm able to get the list of users and total count but I need to count the occurrence of each user and print the user_name: Count, so the key and value.
Nov 28 09:00:08 foxopt210 rshd[6157]: pam_rhosts(rsh:auth): allowed
access to rajeshm#noi-rajeshm.fox.com as rajeshm
#!/usr/bin/python3
f= open("/var/log/messages")
count = 0
for line in f:
if "allowed access" in line:
count+=1
user_id = line.partition('#')[0]
user_id = user_id.split()[-1]
print(user_id)
f.close()
print("--------------------")
print("Total Count :" ,count)
The current code is working as below:
bash-4.1$ ./log.py | tail
navit
akaul
akaul
pankaja
vishalm
vishalm
rajeshm
rajeshm
--------------------
Total Count : 790
While googling around I get the idea of using dictionary for this
purpose and it's working as expected:
#!/usr/bin/python3
from collections import Counter
f= open("/var/log/messages")
count = 0
dictionary = {}
for line in f:
if "allowed access" in line:
user_id = line.partition('#')[0]
user_count = user_id.split()[-1]
if user_count in dictionary:
dictionary[user_count] += 1
else:
dictionary[user_count] = 1
for user_count, occurences in dictionary.items():
print(user_count, ':', occurences)
And my output is as desired:
bash-4.1$ ./log2.py
rajeshm : 5
navit : 780
akaul : 2
pankaja : 1
vishalm : 2
I'm just looking if there is a better way around for this exercise.
When counting things, it's easier to use the collections.Counter() class. I'd encapsulate parsing the lines into a generator here:
def users_accessed(fileobj):
for line in fileobj:
if 'allowed access' in line:
yield line.partition('#')[0].rsplit(None, 1)[-1]
and pass this to the Counter() object:
from collections import Counter
with open("/var/log/messages") as f:
access_counts = Counter(users_accessed(f))
for userid, count in access_counts.most_common():
print(userid, count, sep=':')
This uses the Counter.most_common() method to provide sorted output (most common to least).
You can try with the regular expression and can do this:
import re
pattern=r'(?<=as\s)\w.+'
occurrence={}
with open("/var/log/messages") as f:
for line in f:
search=re.search(pattern,line).group()
if search not in occurrence:
occurrence[search]=1
else:
occurrence[search]=occurrence.get(search)+1
print(occurrence)
Just for fun one line logic:
import re
pattern=r'(?<=as\s)\w.+'
new={}
[new.__setitem__(re.search(pattern, line).group(), 1) if re.search(pattern, line).group() not in new else new.__setitem__(re.search(pattern, line).group(), new.get(re.search(pattern, line).group()) + 1) for line in open('legend.txt','r')]
print(new)

Python equivalent for 'grep -C N'?

So right now I'm looking for something in a file. I am getting a value variable, which is a rather long string, with newlines and so on. Then, I use re.findall(regex, value) to find regex. Regex is rather simple - something like "abc de.*".
Now, I want not only to capture whatever regex has, but also context(exactly like -C flag for grep).
So, assuming that I dumped value to file and ran grep on it, what I'd do is grep -C N 'abc de .*' valueinfile
How can I achieve the same thing in Python? I need the answer to work with Unicode regex/text.
My approach is to split the text block into list of lines. Next, iterate through each line and see if there is a match. In case of a match, then gather the context lines (lines that happens before and after the current line) and return it. Here is my code:
import re
def grep(pattern, block, context_lines=0):
lines = block.splitlines()
for line_number, line in enumerate(lines):
if re.match(pattern, line):
lines_with_context = lines[line_number - context_lines:line_number + context_lines + 1]
yield '\n'.join(lines_with_context)
# Try it out
text_block = """One
Two
Three
abc defg
four
five
six
abc defoobar
seven
eight
abc de"""
pattern = 'abc de.*'
for line in grep(pattern, text_block, context_lines=2):
print line
print '---'
Output:
Two
Three
abc defg
four
five
---
five
six
abc defoobar
seven
eight
---
seven
eight
abc de
---
As recommended by Ignacio Vazquez-Abrams, use a deque to store the last n lines. Once that many lines are present, popleft for each new line added. When your regular expression finds a match, return the previous n lines in the stack then iterate n more lines and return those also.
This keeps you from having to iterate on any line twice (DRY) and stores only minimal data in memory. You also mentioned the need for Unicode, so handling file encoding and adding the Unicode flag to RegEx searches is important. Also, the other answer uses re.match() instead of re.search() and as such may have unintended consequences.
Below is an example. This example only iterates over every line ONCE in the file, which means context lines that also contain hits don't get looked at again. This may or may not be desirable behavior but can easily be tweaked to highlight or otherwise flag lines with additional hits within context for a previous hit.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import re
from collections import deque
def grep(pattern, input_file, context=0, case_sensitivity=True, file_encoding='utf-8'):
stack = deque()
hits = []
lines_remaining = None
with codecs.open(input_file, mode='rb', encoding=file_encoding) as f:
for line in f:
# append next line to stack
stack.append(line)
# keep adding context after hit found (without popping off previous lines of context)
if lines_remaining and lines_remaining > 0:
continue # go to next line in file
elif lines_remaining and lines_remaining == 0:
hits.append(stack)
lines_remaining = None
stack = deque()
# if stack exceeds needed context, pop leftmost line off stack
# (but include current line with possible search hit if applicable)
if len(stack) > context+1:
last_line_removed = stack.popleft()
# search line for pattern
if case_sensitivity:
search_object = re.search(pattern, line, re.UNICODE)
else:
search_object = re.search(pattern, line, re.IGNORECASE|re.UNICODE)
if search_object:
lines_remaining = context
# in case there is not enough lines left in the file to provide trailing context
if lines_remaining and len(stack) > 0:
hits.append(stack)
# return list of deques containing hits with context
return hits # you'll probably want to format the output, this is just an example

Help parsing text file in python

Really been struggling with this one for some time now, i have many text files with a specific format from which i need to extract all the data and file into different fields of a database. The struggle is tweaking the parameters for parsing, ensuring i get all the info correctly.
the format is shown below:
WHITESPACE HERE of unknown length.
K PA DETAILS
2 4565434 i need this sentace as one DB record
2 4456788 and this one
5 4879870 as well as this one, content will vary!
X Max - there sometimes is a line beginning with 'Max' here which i don't need
There is a Line here that i do not need!
WHITESPACE HERE of unknown length.
The tough parts were 1) Getting rid of whitespace, and 2)defining the fields from each other, see my best attempt, below:
dict = {}
XX = (open("XX.txt", "r")).readlines()
for line in XX:
if line.isspace():
pass
elif line.startswith('There is'):
pass
elif line.startswith('Max', 2):
pass
elif line.startswith('K'):
pass
else:
for word in line.split():
if word.startswith('4'):
tmp_PA = word
elif word == "1" or word == "2" or word == "3" or word == "4" or word == "5":
tmp_K = word
else:
tmp_DETAILS = word
cu.execute('''INSERT INTO bugInfo2 (pa, k, details) VALUES(?,?,?)''',(tmp_PA,tmp_K,tmp_DETAILS))
At the minute, i can pull the K & PA fields no problem using this, however my DETAILS is only pulling one word, i need the entire sentance, or at least 25 chars of it.
Thanks very much for reading and I hope you can help! :)
K
You are splitting the whole line into words. You need to split into first word, second word and the rest. Like line.split(None, 2).
It would probably use regular expressions. And use the oposite logic, that is if it starts with number 1 through 5, use it, otherwise pass. Like:
pattern = re.compile(r'([12345])\s+\(d+)\s+\(.*\S)')
f = open('XX.txt', 'r') # No calling readlines; lazy iteration is better
for line in f:
m = pattern.match(line)
if m:
cu.execute('''INSERT INTO bugInfo2 (pa, k, details) VALUES(?,?,?)''',
(m.group(2), m.group(1), m.group(3)))
Oh, and of course, you should be using prepared statement. Parsing SQL is orders of magnitude slower than executing it.
If I understand correctly your file format, you can try this script
filename = 'bug.txt'
f = file(filename,'r')
foundHeaders = False
records = []
for rawline in f:
line = rawline.strip()
if not foundHeaders:
tokens = line.split()
if tokens == ['K','PA','DETAILS']:
foundHeaders = True
continue
else:
tokens = line.split(None,2)
if len(tokens) != 3:
break
try:
K = int(tokens[0])
PA = int(tokens[1])
except ValueError:
break
records.append((K,PA,tokens[2]))
f.close()
for r in records:
print r # replace this by your DB insertion code
This will start reading the records when it encounters the header line, and stop as soon as the format of the line is no longer (K,PA,description).
Hope this helps.
Here is my attempt using re
import re
stuff = open("source", "r").readlines()
whitey = re.compile(r"^[\s]+$")
header = re.compile(r"K PA DETAILS")
juicy_info = re.compile(r"^(?P<first>[\d])\s(?P<second>[\d]+)\s(?P<third>.+)$")
for line in stuff:
if whitey.match(line):
pass
elif header.match(line):
pass
elif juicy_info.match(line):
result = juicy_info.search(line)
print result.group('third')
print result.group('second')
print result.group('first')
Using re I can pull the data out and manipulate it on a whim. If you only need the juicy info lines, you can actually take out all the other checks, making this a REALLY concise script.
import re
stuff = open("source", "r").readlines()
#create a regular expression using subpatterns.
#'first, 'second' and 'third' are our own tags ,
# we could call them Adam, Betty, etc.
juicy_info = re.compile(r"^(?P<first>[\d])\s(?P<second>[\d]+)\s(?P<third>.+)$")
for line in stuff:
result = juicy_info.search(line)
if result:#do stuff with data here just use the tag we declared earlier.
print result.group('third')
print result.group('second')
print result.group('first')
import re
reg = re.compile('K[ \t]+PA[ \t]+DETAILS[ \t]*\r?\n'\
+ 3*'([1-5])[ \t]+(\d+)[ \t]*([^\r\n]+?)[ \t]*\r?\n')
with open('XX.txt') as f:
mat = reg.search(f.read())
for tripl in ((2,1,3),(5,4,6),(8,7,9)):
cu.execute('''INSERT INTO bugInfo2 (pa, k, details) VALUES(?,?,?)''',
mat.group(*tripl)
I prefer to use [ \t] instead of \s because \s matches the following characters:
blank , '\f', '\n', '\r', '\t', '\v'
and I don't see any reason to use a symbol representing more that what is to be matched, with risks to match erratic newlines at places where they shouldn't be
Edit
It may be sufficient to do:
import re
reg = re.compile(r'^([1-5])[ \t]+(\d+)[ \t]*([^\r\n]+?)[ \t]*$',re.MULTILINE)
with open('XX.txt') as f:
for mat in reg.finditer(f.read()):
cu.execute('''INSERT INTO bugInfo2 (pa, k, details) VALUES(?,?,?)''',
mat.group(2,1,3)

Categories

Resources