RegEx: replace text except if it is between quotes - python

I am working on a transpiler and want to replace my language's tokens with those of Python. The substitution is done like so:
for rep in reps:
pattern, translated = rep;
# Replaces every [pattern] with [translated] in [transpiled]
transpiled = re.sub(pattern, translated, transpiled, flags=re.UNICODE)
Where reps is a list of (regex to be replaced, string to replace it with) ordered pairs and transpiled is the text to be transpiled.
However, I can't seem to find a way to exclude text between quotes from the substitution process. Please note that this is for a language, so it should work for escaped quotes and single quotes as well.

This may depend on how you define your patterns, but in general you can always surround your pattern with a lookahead and a lookbehind group to ensure that text between quotes is not matched:
import re
transpiled = "A foo with \"foo\" and single quoted 'foo'. It even has an escaped \\'foo\\'!"
reps = [("foo", "bar"), ("and", "or")]
print(transpiled) # before the changes
for rep in reps:
pattern, translated = rep
transpiled = re.sub("(?<=[^\"']){}(?=\\\\?[^\"'])".format(pattern),
translated, transpiled, flags=re.UNICODE)
print(transpiled) # after each change
Which will yield:
A foo with "foo" and single quoted 'foo'. It even has an escaped \'foo\'!
A bar with "foo" and single quoted 'foo'. It even has an escaped \'foo\'!
A bar with "foo" or single quoted 'foo'. It even has an escaped \'foo\'!
UPDATE: If you want to ignore whole quoted swaths of text, not just a quoted word, you'll have to do a bit more work. While you could do it by looking for repeated quotations the whole lookahead/lookbehind mechanism would get really messy and probably far from optimal - it's just easier to separate the quoted from non-quoted text and do replacements only in the former, something like:
import re
QUOTED_STRING = re.compile("(\\\\?[\"']).*?\\1") # a pattern to match strings between quotes
def replace_multiple(source, replacements, flags=0): # a convenience replacement function
if not source: # no need to process empty strings
return ""
for r in replacements:
source = re.sub(r[0], r[1], source, flags=flags)
return source
def replace_non_quoted(source, replacements, flags=0):
result = [] # a store for the result pieces
head = 0 # a search head reference
for match in QUOTED_STRING.finditer(source):
# process everything until the current quoted match and add it to the result
result.append(replace_multiple(source[head:match.start()], replacements, flags))
result.append(match[0]) # add the quoted match verbatim to the result
head = match.end() # move the search head to the end of the quoted match
if head < len(source): # if the search head is not at the end of the string
# process the rest of the string and add it to the result
result.append(replace_multiple(source[head:], replacements, flags))
return "".join(result) # join back the result pieces and return them
You can test it as:
print(replace_non_quoted("A foo with \"foo\" and 'foo', says: 'I have a foo'!", reps))
# A bar with "foo" or 'foo', says: 'I have a foo'!
print(replace_non_quoted("A foo with \"foo\" and foo, says: \\'I have a foo\\'!", reps))
# A bar with "foo" or bar, says: \'I have a foo\'!
print(replace_non_quoted("A foo with '\"foo\" and foo', says - I have a foo!", reps))
# A bar with '"foo" and foo', says - I have a bar!
As an added bonus, this also allows you to define fully qualified regex patterns as your replacements:
print(replace_non_quoted("My foo and \"bar\" are like 'moo' and star!",
(("(\w+)oo", "oo\\1"), ("(\w+)ar", "ra\\1"))))
# My oof and "bar" are like 'moo' and rast!
But if your replacements do not involve patterns and need just a simple substitution you can replace the re.sub() in the replace_multiple() helper function with the significantly faster native str.replace().
Finally, you can get rid of regex completely if you don't need complex patterns:
QUOTE_STRINGS = ("'", "\\'", '"', '\\"') # a list of substring considered a 'quote'
def replace_multiple(source, replacements): # a convenience multi-replacement function
if not source: # no need to process empty strings
return ""
for r in replacements:
source = source.replace(r[0], r[1])
return source
def replace_non_quoted(source, replacements):
result = [] # a store for the result pieces
head = 0 # a search head reference
eos = len(source) # a convenience string length reference
quote = None # last quote match literal
quote_len = 0 # a convenience reference to the current quote substring length
while True:
if quote: # we already have a matching quote stored
index = source.find(quote, head + quote_len) # find the closing quote
if index == -1: # EOS reached
break
result.append(source[head:index + quote_len]) # add the quoted string verbatim
head = index + quote_len # move the search head after the quoted match
quote = None # blank out the quote literal
else: # the current position is not in a quoted substring
index = eos
# find the first quoted substring from the current head position
for entry in QUOTE_STRINGS: # loop through all quote substrings
candidate = source.find(entry, head)
if head < candidate < index:
index = candidate
quote = entry
quote_len = len(entry)
if not quote: # EOS reached, no quote found
break
result.append(replace_multiple(source[head:index], replacements))
head = index # move the search head to the start of the quoted match
if head < eos: # if the search head is not at the end of the string
result.append(replace_multiple(source[head:], replacements))
return "".join(result) # join back the result pieces and return them

Rather than just using regexes, you probably want to use Python's built in shlex module. It's designed for handling quoted strings like you find in a shell, including nested examples.
import shlex
shlex.split("""look "nested \\"quotes\\"" here""")
# ['look', 'nested "quotes"', 'here']

Related

How can I implement isalnum() into this Python web scraper to remove special characters? [duplicate]

I'm trying to remove specific characters from a string using Python. This is the code I'm using right now. Unfortunately it appears to do nothing to the string.
for char in line:
if char in " ?.!/;:":
line.replace(char,'')
How do I do this properly?
Strings in Python are immutable (can't be changed). Because of this, the effect of line.replace(...) is just to create a new string, rather than changing the old one. You need to rebind (assign) it to line in order to have that variable take the new value, with those characters removed.
Also, the way you are doing it is going to be kind of slow, relatively. It's also likely to be a bit confusing to experienced pythonators, who will see a doubly-nested structure and think for a moment that something more complicated is going on.
Starting in Python 2.6 and newer Python 2.x versions *, you can instead use str.translate, (see Python 3 answer below):
line = line.translate(None, '!##$')
or regular expression replacement with re.sub
import re
line = re.sub('[!##$]', '', line)
The characters enclosed in brackets constitute a character class. Any characters in line which are in that class are replaced with the second parameter to sub: an empty string.
Python 3 answer
In Python 3, strings are Unicode. You'll have to translate a little differently. kevpie mentions this in a comment on one of the answers, and it's noted in the documentation for str.translate.
When calling the translate method of a Unicode string, you cannot pass the second parameter that we used above. You also can't pass None as the first parameter. Instead, you pass a translation table (usually a dictionary) as the only parameter. This table maps the ordinal values of characters (i.e. the result of calling ord on them) to the ordinal values of the characters which should replace them, or—usefully to us—None to indicate that they should be deleted.
So to do the above dance with a Unicode string you would call something like
translation_table = dict.fromkeys(map(ord, '!##$'), None)
unicode_line = unicode_line.translate(translation_table)
Here dict.fromkeys and map are used to succinctly generate a dictionary containing
{ord('!'): None, ord('#'): None, ...}
Even simpler, as another answer puts it, create the translation table in place:
unicode_line = unicode_line.translate({ord(c): None for c in '!##$'})
Or, as brought up by Joseph Lee, create the same translation table with str.maketrans:
unicode_line = unicode_line.translate(str.maketrans('', '', '!##$'))
* for compatibility with earlier Pythons, you can create a "null" translation table to pass in place of None:
import string
line = line.translate(string.maketrans('', ''), '!##$')
Here string.maketrans is used to create a translation table, which is just a string containing the characters with ordinal values 0 to 255.
Am I missing the point here, or is it just the following:
string = "ab1cd1ef"
string = string.replace("1", "")
print(string)
# result: "abcdef"
Put it in a loop:
a = "a!b#c#d$"
b = "!##$"
for char in b:
a = a.replace(char, "")
print(a)
# result: "abcd"
>>> line = "abc##!?efg12;:?"
>>> ''.join( c for c in line if c not in '?:!/;' )
'abc##efg12'
With re.sub regular expression
Since Python 3.5, substitution using regular expressions re.sub became available:
import re
re.sub('\ |\?|\.|\!|\/|\;|\:', '', line)
Example
import re
line = 'Q: Do I write ;/.??? No!!!'
re.sub('\ |\?|\.|\!|\/|\;|\:', '', line)
'QDoIwriteNo'
Explanation
In regular expressions (regex), | is a logical OR and \ escapes spaces and special characters that might be actual regex commands. Whereas sub stands for substitution, in this case with the empty string ''.
The asker almost had it. Like most things in Python, the answer is simpler than you think.
>>> line = "H E?.LL!/;O:: "
>>> for char in ' ?.!/;:':
... line = line.replace(char,'')
...
>>> print line
HELLO
You don't have to do the nested if/for loop thing, but you DO need to check each character individually.
For the inverse requirement of only allowing certain characters in a string, you can use regular expressions with a set complement operator [^ABCabc]. For example, to remove everything except ascii letters, digits, and the hyphen:
>>> import string
>>> import re
>>>
>>> phrase = ' There were "nine" (9) chick-peas in my pocket!!! '
>>> allow = string.letters + string.digits + '-'
>>> re.sub('[^%s]' % allow, '', phrase)
'Therewerenine9chick-peasinmypocket'
From the python regular expression documentation:
Characters that are not within a range can be matched by complementing
the set. If the first character of the set is '^', all the characters
that are not in the set will be matched. For example, [^5] will match
any character except '5', and [^^] will match any character except
'^'. ^ has no special meaning if it’s not the first character in the
set.
line = line.translate(None, " ?.!/;:")
>>> s = 'a1b2c3'
>>> ''.join(c for c in s if c not in '123')
'abc'
Strings are immutable in Python. The replace method returns a new string after the replacement. Try:
for char in line:
if char in " ?.!/;:":
line = line.replace(char,'')
This is identical to your original code, with the addition of an assignment to line inside the loop.
Note that the string replace() method replaces all of the occurrences of the character in the string, so you can do better by using replace() for each character you want to remove, instead of looping over each character in your string.
I was surprised that no one had yet recommended using the builtin filter function.
import operator
import string # only for the example you could use a custom string
s = "1212edjaq"
Say we want to filter out everything that isn't a number. Using the filter builtin method "...is equivalent to the generator expression (item for item in iterable if function(item))" [Python 3 Builtins: Filter]
sList = list(s)
intsList = list(string.digits)
obj = filter(lambda x: operator.contains(intsList, x), sList)))
In Python 3 this returns
>> <filter object # hex>
To get a printed string,
nums = "".join(list(obj))
print(nums)
>> "1212"
I am not sure how filter ranks in terms of efficiency but it is a good thing to know how to use when doing list comprehensions and such.
UPDATE
Logically, since filter works you could also use list comprehension and from what I have read it is supposed to be more efficient because lambdas are the wall street hedge fund managers of the programming function world. Another plus is that it is a one-liner that doesnt require any imports. For example, using the same string 's' defined above,
num = "".join([i for i in s if i.isdigit()])
That's it. The return will be a string of all the characters that are digits in the original string.
If you have a specific list of acceptable/unacceptable characters you need only adjust the 'if' part of the list comprehension.
target_chars = "".join([i for i in s if i in some_list])
or alternatively,
target_chars = "".join([i for i in s if i not in some_list])
Using filter, you'd just need one line
line = filter(lambda char: char not in " ?.!/;:", line)
This treats the string as an iterable and checks every character if the lambda returns True:
>>> help(filter)
Help on built-in function filter in module __builtin__:
filter(...)
filter(function or None, sequence) -> list, tuple, or string
Return those items of sequence for which function(item) is true. If
function is None, return the items that are true. If sequence is a tuple
or string, return the same type, else return a list.
Try this one:
def rm_char(original_str, need2rm):
''' Remove charecters in "need2rm" from "original_str" '''
return original_str.translate(str.maketrans('','',need2rm))
This method works well in Python 3
Here's some possible ways to achieve this task:
def attempt1(string):
return "".join([v for v in string if v not in ("a", "e", "i", "o", "u")])
def attempt2(string):
for v in ("a", "e", "i", "o", "u"):
string = string.replace(v, "")
return string
def attempt3(string):
import re
for v in ("a", "e", "i", "o", "u"):
string = re.sub(v, "", string)
return string
def attempt4(string):
return string.replace("a", "").replace("e", "").replace("i", "").replace("o", "").replace("u", "")
for attempt in [attempt1, attempt2, attempt3, attempt4]:
print(attempt("murcielago"))
PS: Instead using " ?.!/;:" the examples use the vowels... and yeah, "murcielago" is the Spanish word to say bat... funny word as it contains all the vowels :)
PS2: If you're interested on performance you could measure these attempts with a simple code like:
import timeit
K = 1000000
for i in range(1,5):
t = timeit.Timer(
f"attempt{i}('murcielago')",
setup=f"from __main__ import attempt{i}"
).repeat(1, K)
print(f"attempt{i}",min(t))
In my box you'd get:
attempt1 2.2334518376057244
attempt2 1.8806643818474513
attempt3 7.214925774955572
attempt4 1.7271184513757465
So it seems attempt4 is the fastest one for this particular input.
Here's my Python 2/3 compatible version. Since the translate api has changed.
def remove(str_, chars):
"""Removes each char in `chars` from `str_`.
Args:
str_: String to remove characters from
chars: String of to-be removed characters
Returns:
A copy of str_ with `chars` removed
Example:
remove("What?!?: darn;", " ?.!:;") => 'Whatdarn'
"""
try:
# Python2.x
return str_.translate(None, chars)
except TypeError:
# Python 3.x
table = {ord(char): None for char in chars}
return str_.translate(table)
#!/usr/bin/python
import re
strs = "how^ much for{} the maple syrup? $20.99? That's[] ricidulous!!!"
print strs
nstr = re.sub(r'[?|$|.|!|a|b]',r' ',strs)#i have taken special character to remove but any #character can be added here
print nstr
nestr = re.sub(r'[^a-zA-Z0-9 ]',r'',nstr)#for removing special character
print nestr
You can also use a function in order to substitute different kind of regular expression or other pattern with the use of a list. With that, you can mixed regular expression, character class, and really basic text pattern. It's really useful when you need to substitute a lot of elements like HTML ones.
*NB: works with Python 3.x
import re # Regular expression library
def string_cleanup(x, notwanted):
for item in notwanted:
x = re.sub(item, '', x)
return x
line = "<title>My example: <strong>A text %very% $clean!!</strong></title>"
print("Uncleaned: ", line)
# Get rid of html elements
html_elements = ["<title>", "</title>", "<strong>", "</strong>"]
line = string_cleanup(line, html_elements)
print("1st clean: ", line)
# Get rid of special characters
special_chars = ["[!##$]", "%"]
line = string_cleanup(line, special_chars)
print("2nd clean: ", line)
In the function string_cleanup, it takes your string x and your list notwanted as arguments. For each item in that list of elements or pattern, if a substitute is needed it will be done.
The output:
Uncleaned: <title>My example: <strong>A text %very% $clean!!</strong></title>
1st clean: My example: A text %very% $clean!!
2nd clean: My example: A text very clean
My method I'd use probably wouldn't work as efficiently, but it is massively simple. I can remove multiple characters at different positions all at once, using slicing and formatting.
Here's an example:
words = "things"
removed = "%s%s" % (words[:3], words[-1:])
This will result in 'removed' holding the word 'this'.
Formatting can be very helpful for printing variables midway through a print string. It can insert any data type using a % followed by the variable's data type; all data types can use %s, and floats (aka decimals) and integers can use %d.
Slicing can be used for intricate control over strings. When I put words[:3], it allows me to select all the characters in the string from the beginning (the colon is before the number, this will mean 'from the beginning to') to the 4th character (it includes the 4th character). The reason 3 equals till the 4th position is because Python starts at 0. Then, when I put word[-1:], it means the 2nd last character to the end (the colon is behind the number). Putting -1 will make Python count from the last character, rather than the first. Again, Python will start at 0. So, word[-1:] basically means 'from the second last character to the end of the string.
So, by cutting off the characters before the character I want to remove and the characters after and sandwiching them together, I can remove the unwanted character. Think of it like a sausage. In the middle it's dirty, so I want to get rid of it. I simply cut off the two ends I want then put them together without the unwanted part in the middle.
If I want to remove multiple consecutive characters, I simply shift the numbers around in the [] (slicing part). Or if I want to remove multiple characters from different positions, I can simply sandwich together multiple slices at once.
Examples:
words = "control"
removed = "%s%s" % (words[:2], words[-2:])
removed equals 'cool'.
words = "impacts"
removed = "%s%s%s" % (words[1], words[3:5], words[-1])
removed equals 'macs'.
In this case, [3:5] means character at position 3 through character at position 5 (excluding the character at the final position).
Remember, Python starts counting at 0, so you will need to as well.
In Python 3.5
e.g.,
os.rename(file_name, file_name.translate({ord(c): None for c in '0123456789'}))
To remove all the number from the string
How about this:
def text_cleanup(text):
new = ""
for i in text:
if i not in " ?.!/;:":
new += i
return new
Below one.. with out using regular expression concept..
ipstring ="text with symbols!##$^&*( ends here"
opstring=''
for i in ipstring:
if i.isalnum()==1 or i==' ':
opstring+=i
pass
print opstring
Recursive split:
s=string ; chars=chars to remove
def strip(s,chars):
if len(s)==1:
return "" if s in chars else s
return strip(s[0:int(len(s)/2)],chars) + strip(s[int(len(s)/2):len(s)],chars)
example:
print(strip("Hello!","lo")) #He!
You could use the re module's regular expression replacement. Using the ^ expression allows you to pick exactly what you want from your string.
import re
text = "This is absurd!"
text = re.sub("[^a-zA-Z]","",text) # Keeps only Alphabets
print(text)
Output to this would be "Thisisabsurd". Only things specified after the ^ symbol will appear.
# for each file on a directory, rename filename
file_list = os.listdir (r"D:\Dev\Python")
for file_name in file_list:
os.rename(file_name, re.sub(r'\d+','',file_name))
Even the below approach works
line = "a,b,c,d,e"
alpha = list(line)
while ',' in alpha:
alpha.remove(',')
finalString = ''.join(alpha)
print(finalString)
output: abcde
The string method replace does not modify the original string. It leaves the original alone and returns a modified copy.
What you want is something like: line = line.replace(char,'')
def replace_all(line, )for char in line:
if char in " ?.!/;:":
line = line.replace(char,'')
return line
However, creating a new string each and every time that a character is removed is very inefficient. I recommend the following instead:
def replace_all(line, baddies, *):
"""
The following is documentation on how to use the class,
without reference to the implementation details:
For implementation notes, please see comments begining with `#`
in the source file.
[*crickets chirp*]
"""
is_bad = lambda ch, baddies=baddies: return ch in baddies
filter_baddies = lambda ch, *, is_bad=is_bad: "" if is_bad(ch) else ch
mahp = replace_all.map(filter_baddies, line)
return replace_all.join('', join(mahp))
# -------------------------------------------------
# WHY `baddies=baddies`?!?
# `is_bad=is_bad`
# -------------------------------------------------
# Default arguments to a lambda function are evaluated
# at the same time as when a lambda function is
# **defined**.
#
# global variables of a lambda function
# are evaluated when the lambda function is
# **called**
#
# The following prints "as yellow as snow"
#
# fleece_color = "white"
# little_lamb = lambda end: return "as " + fleece_color + end
#
# # sometime later...
#
# fleece_color = "yellow"
# print(little_lamb(" as snow"))
# --------------------------------------------------
replace_all.map = map
replace_all.join = str.join
If you want your string to be just allowed characters by using ASCII codes, you can use this piece of code:
for char in s:
if ord(char) < 96 or ord(char) > 123:
s = s.replace(char, "")
It will remove all the characters beyond a....z even upper cases.

I have a problem with the task of reversing words and removing parentheses

Task
Write a program that will decode the secret message by reversing text
between square brackets. The message may contain nested brackets (that
is, brackets within brackets, such as One[owT[Three[ruoF]]]). In
this case, innermost brackets take precedence, similar to parentheses
in mathematical expressions, e.g. you could decode the aforementioned
example like this:
One[owT[Three[ruoF]]]
One[owT[ThreeFour]]
One[owTruoFeerhT]
OneThreeFourTwo
In order to make your own task slightly easier and less tricky, you
have already replaced all whitespaces in the original text with
underscores (“_”) while copying it from the paper version.
Input description
The first and only line of the standard input
consists of a non-empty string of up to 2 · 106 characters which may
be letters, digits, basic punctuation (“,.?!’-;:”), underscores (“_”)
and square brackets (“[]”). You can safely assume that all square
brackets are paired correctly, i.e. every opening bracket has exactly
one closing bracket matching it and vice versa.
Output description
The standard output should contain one line – the
decoded secret message without any square brackets.
Example
For sample input:
A[W_[y,[]]oh]o[dlr][!]
the correct output is:
Ahoy,_World!
Explanation
This example contains empty brackets. Of course, an empty string, when
reversed, remains empty, so we can simply ignore them. Then, as
previously, we can decode this example in stages, first reversing the
innermost brackets to obtain A[W_,yoh]o[dlr][!]. Afterwards, there
are no longer any nested brackets, so the remainder of the task is
trivial.
Below is my program that doesn't quite work
word = input("print something: ")
word_reverse = word[::-1]
while("[" in word and "]" in word):
open_brackets_index = word.index("[")
close_brackets_index = word_reverse.index("]")*(-1)-1
# print(word)
# print(open_brackets_index)
# print(close_brackets_index)
reverse_word_into_quotes = word[open_brackets_index+1:close_brackets_index:][::-1]
word = word[:close_brackets_index]
word = word[:open_brackets_index]
word = word+reverse_word_into_quotes
word = word.replace("[","]").replace("]","[")
print(word)
print(word)
Unfortunately my code only works with one pair of parentheses and I don't know how to fix it.
Thank you in advance for your help
Assuming the re module can be used, this code does the job:
import re
text = 'A[W_[y,[]]oh]o[dlr][!]'
# This scary regular expresion does all the work:
# It says find a sequence that starts with [ and ends with ] and
# contains anything BUT [ and ]
pattern = re.compile('\[([^\[\]]*)\]')
while True:
m = re.search(pattern, text)
if m:
# Here a single pattern like [String], if any, is replaced with gnirtS
text = re.sub(pattern, m[1][::-1], text, count=1)
else:
break
print(text)
Which prints this line:
Ahoy,_World!
I realize the my previous answer has been accepted but, for completeness, I'm submitting a second solution that does NOT use the re module:
text = 'A[W_[y,[]]oh]o[dlr][!]'
def find_pattern(text):
# Find [...] and return the locations of [ (start) ] (end)
# and the in-between str (content)
content = ''
for i,c in enumerate(text):
if c == '[':
content = ''
start = i
elif c == ']':
end = i
return start, end, content
else:
content += c
return None, None, None
while True:
start, end, content = find_pattern(text)
if start is None:
break
# Replace the content between [] with its reverse
text = "".join((text[:start], content[::-1], text[end+1:]))
print(text)

Working with pieces of text in python

Data.txt includes words that are upper and lower-cased.
I need to lower case them all except for the upper-cased characters that appear in braces which are located immediately following a word that can end in either lower or upper case, but there is no space before the first brace.
e.g.
CAT{TT} Dog{DD} Horse{AA}
Snail{LL} RAT{TT}
ANT{AA}
These should be transformed into:
cat{TT} dog{DD} horse{AA}
snail{LL} rat{TT}
ant{AA}
As a first start, I lower-cased everything in the list and placed them in lcChar(code as below). I was then trying to find the lower-cased characters within braces so that I could upper case them again.
Being a python newbie, I got stuck in my code below. This gives only the very first item in braces. Also I am assuming I need another loop in order to upper case all the items that appear in the braces. Any help please so I can understand the best methodology for handling these type of issues?
import re
f = open(r'C:\Python27\MyScripts\Data.txt')
for line in f:
lcChar = (line.lower())
patFinder1 = re.compile('{[a-z]+}')
findPat1=re.findall(patFinder1, lcChar)
re.sub and re.subn allow the second parameter to be a function. A Match Object is passed into that function and whatever the function returns is used for the substitution.
This is my take on it:
import re
def manip(m):
return m.groups()[0].lower()
data = ['CAT{TT} Dog{DD} Horse{AA}',
'Snail{LL} RAT{TT}',
'ANT{AA}']
for line in data:
new_line = re.sub(r'((?:[^{]|^)[A-Z]+(?:[^}]|$))', manip, line)
print new_line
Produces:
cat{TT} dog{DD} horse{AA}
snail{LL} rat{TT}
ant{AA}
I could have used a lambda instead, but that's arguably less clear.
A straight forward way of doing it:
import re
regex = re.compile('([^}]*?{)')
str_ = '''CAT{TT} Dog{DD} Horse{AA}
Snail{LL} RAT{TT}
ANT{AA}'''
new_str = re.sub(regex, lambda match: match.groups()[0].lower(), str_)
assert new_str == '''cat{TT} dog{DD} horse{AA}
snail{LL} rat{TT}
ant{AA}'''
print new_str
Explaination:
I use the regex to only match what need to be lowercased:
Then I loop over the results and replace to lowercase version.
Edit: more optimize version using sub to replace.

Split a string using a list of strings as a pattern

Consider an input string :
mystr = "just some stupid string to illustrate my question"
and a list of strings indicating where to split the input string:
splitters = ["some", "illustrate"]
The output should look like
result = ["just ", "some stupid string to ", "illustrate my question"]
I wrote some code which implements the following approach. For each of the strings in splitters, I find its occurrences in the input string, and insert something which I know for sure would not be a part of my input string (for example, this '!!'). Then I split the string using the substring that I just inserted.
for s in splitters:
mystr = re.sub(r'(%s)'%s,r'!!\1', mystr)
result = re.split('!!', mystr)
This solution seems ugly, is there a nicer way of doing it?
Splitting with re.split will always remove the matched string from the output (NB, this is not quite true, see the edit below). Therefore, you must use positive lookahead expressions ((?=...)) to match without removing the match. However, re.split ignores empty matches, so simply using a lookahead expression doesn't work. Instead, you will lose one character at each split at minimum (even trying to trick re with "boundary" matches (\b) does not work). If you don't care about losing one whitespace / non-word character at the end of each item (assuming you only split at non-word characters), you can use something like
re.split(r"\W(?=some|illustrate)")
which would give
["just", "some stupid string to", "illustrate my question"]
(note that the spaces after just and to are missing). You could then programmatically generate these regexes using str.join. Note that each of the split markers is escaped with re.escape so that special characters in the items of splitters do not affect the meaning of the regular expression in any undesired ways (imagine, e.g., a ) in one of the strings, which would otherwise lead to a regex syntax error).
the_regex = r"\W(?={})".format("|".join(re.escape(s) for s in splitters))
Edit (HT to #Arkadiy): Grouping the actual match, i.e. using (\W) instead of \W, returns the non-word characters inserted into the list as seperate items. Joining every two subsequent items would then produce the list as desired as well. Then, you can also drop the requirement of having a non-word character by using (.) instead of \W:
the_new_regex = r"(.)(?={})".format("|".join(re.escape(s) for s in splitters))
the_split = re.split(the_new_regex, mystr)
the_actual_split = ["".join(x) for x in itertools.izip_longest(the_split[::2], the_split[1::2], fillvalue='')]
Because normal text and auxiliary character alternate, the_split[::2] contains the normal split text and the_split[1::2] the auxiliary characters. Then, itertools.izip_longest is used to combine each text item with the corresponding removed character and the last item (which is unmatched in the removed characters)) with fillvalue, i.e. ''. Then, each of these tuples is joined using "".join(x). Note that this requires itertools to be imported (you could of course do this in a simple loop, but itertools provides very clean solutions to these things). Also note that itertools.izip_longest is called itertools.zip_longest in Python 3.
This leads to further simplification of the regular expression, because instead of using auxiliary characters, the lookahead can be replaced with a simple matching group ((some|interesting) instead of (.)(?=some|interesting)):
the_newest_regex = "({})".format("|".join(re.escape(s) for s in splitters))
the_raw_split = re.split(the_newest_regex, mystr)
the_actual_split = ["".join(x) for x in itertools.izip_longest([""] + the_raw_split[1::2], the_raw_split[::2], fillvalue='')]
Here, the slice indices on the_raw_split have swapped, because now the even-numbered items must be added to item afterwards instead of in front. Also note the [""] + part, which is necessary to pair the first item with "" to fix the order.
(end of edit)
Alternatively, you can (if you want) use string.replace instead of re.sub for each splitter (I think that is a matter of preference in your case, but in general it is probably more efficient)
for s in splitters:
mystr = mystr.replace(s, "!!" + s)
Also, if you use a fixed token to indicate where to split, you do not need re.split, but can use string.split instead:
result = mystr.split("!!")
What you could also do (instead of relying on the replacement token not to be in the string anywhere else or relying on every split position being preceded by a non-word character) is finding the split strings in the input using string.find and using string slicing to extract the pieces:
def split(string, splitters):
while True:
# Get the positions to split at for all splitters still in the string
# that are not at the very front of the string
split_positions = [i for i in (string.find(s) for s in splitters) if i > 0]
if len(split_positions) > 0:
# There is still somewhere to split
next_split = min(split_positions)
yield string[:next_split] # Yield everything before that position
string = string[next_split:] # Retain the rest of the string
else:
yield string # Yield the rest of the string
break # Done.
Here, [i for i in (string.find(s) for s in splitters) if i > 0] generates a list of positions where the splitters can be found, for all splitters that are in the string (for this, i < 0 is excluded) and not right at the beginning (where we (possibly) just split, so i == 0 is excluded as well). If there are any left in the string, we yield (this is a generator function) everything up to (excluding) the first splitter (at min(split_positions)) and replace the string with the remaining part. If there are none left, we yield the last part of the string and exit the function. Because this uses yield, it is a generator function, so you need to use list to turn it into an actual list.
Note that you could also replace yield whatever with a call to some_list.append (provided you defined some_list earlier) and return some_list at the very end, I do not consider that to be very good code style, though.
TL;DR
If you are OK with using regular expressions, use
the_newest_regex = "({})".format("|".join(re.escape(s) for s in splitters))
the_raw_split = re.split(the_newest_regex, mystr)
the_actual_split = ["".join(x) for x in itertools.izip_longest([""] + the_raw_split[1::2], the_raw_split[::2], fillvalue='')]
else, the same can also be achieved using string.find with the following split function:
def split(string, splitters):
while True:
# Get the positions to split at for all splitters still in the string
# that are not at the very front of the string
split_positions = [i for i in (string.find(s) for s in splitters) if i > 0]
if len(split_positions) > 0:
# There is still somewhere to split
next_split = min(split_positions)
yield string[:next_split] # Yield everything before that position
string = string[next_split:] # Retain the rest of the string
else:
yield string # Yield the rest of the string
break # Done.
Not especially elegant but avoiding regex:
mystr = "just some stupid string to illustrate my question"
splitters = ["some", "illustrate"]
indexes = [0] + [mystr.index(s) for s in splitters] + [len(mystr)]
indexes = sorted(list(set(indexes)))
print [mystr[i:j] for i, j in zip(indexes[:-1], indexes[1:])]
# ['just ', 'some stupid string to ', 'illustrate my question']
I should acknowledge here that a little more work is needed if a word in splitters occurs more than once because str.index finds only the location of the first occurrence of the word...

Python complex regex replace

I'm trying to do a simple VB6 to c translator to help me port an open source game to the c language.
I want to be able to get "NpcList[NpcIndex]" from "With Npclist[NpcIndex]" using ragex and to replace it everywhere it has to be replaced. ("With" is used as a macro in VB6 that adds Npclist[NpcIndex] when ever it needs to until it founds "End With")
Example:
With Npclist[NpcIndex]
.goTo(245) <-- it should be replaced with Npclist[NpcIndex].goTo(245)
End With
Is it possible to use regex to do the job?
I've tried using a function to perfom another regex replace between the "With" and the "End With" but I can't know the text the "With" is replacing (Npclist[NpcIndex]).
Thanks in advance
I personally wouldn't trust any single-regex solution to get it right on the first time nor feel like debugging it. Instead, I would parse the code line-to-line and cache any With expression to use it to replace any . directly preceded by whitespace or by any type of brackets (add use-cases as needed):
(?<=[\s[({])\. - positive lookbehind for any character from the set + escaped literal dot
(?:(?<=[\s[({])|^)\. - use this non-capturing alternatives list if to-be-replaced . can occur on the beginning of line
import re
def convert_vb_to_c(vb_code_lines):
c_code = []
current_with = ""
for line in vb_code_lines:
if re.search(r'^\s*With', line) is not None:
current_with = line[5:] + "."
continue
elif re.search(r'^\s*End With', line) is not None:
current_with = "{error_outside_with_replacement}"
continue
line = re.sub(r'(?<=[\s[({])\.', current_with, line)
c_code.append(line)
return "\n".join(c_code)
example = """
With Npclist[NpcIndex]
.goTo(245)
End With
With hatla
.matla.tatla[.matla.other] = .matla.other2
dont.mind.me(.do.mind.me)
.next()
End With
"""
# use file_object.readlines() in real life
print(convert_vb_to_c(example.split("\n")))
You can pass a function to the sub method:
# just to give the idea of the regex
regex = re.compile(r'''With (.+)
(the-regex-for-the-VB-expression)+?
End With''')
def repl(match):
beginning = match.group(1) # NpcList[NpcIndex] in your example
return ''.join(beginning + line for line in match.group(2).splitlines())
re.sub(regex, repl, the_string)
In repl you can obtain all the information about the matching from the match object, build whichever string you want and return it. The matched string will be replaced by the string you return.
Note that you must be really careful to write the regex above. In particular using (.+) as I did matches all the line up to the newline excluded, which or may not be what you want(but I don't know VB and I have no idea which regex could go there instead to catch only what you want.
The same goes for the (the-regex-forthe-VB-expression)+. I have no idea what code could be in those lines, hence I leave to you the detail of implementing it. Maybe taking all the line can be okay, but I wouldn't trust something this simple(probably expressions can span multiple lines, right?).
Also doing all in one big regular expression is, in general, error prone and slow.
I'd strongly consider regexes only to find With and End With and use something else to do the replacements.
This may do what you need in Python 2.7. I'm assuming you want to strip out the With and End With, right? You don't need those in C.
>>> import re
>>> search_text = """
... With Np1clist[Npc1Index]
... .comeFrom(543)
... End With
...
... With Npc2list[Npc2Index]
... .goTo(245)
... End With"""
>>>
>>> def f(m):
... return '{0}{1}({2})'.format(m.group(1), m.group(2), m.group(3))
...
>>> regex = r'With\s+([^\s]*)\s*(\.[^(]+)\(([^)]+)\)[^\n]*\nEnd With'
>>> print re.sub(regex, f, search_text)
Np1clist[Npc1Index].comeFrom(543)
Npc2list[Npc2Index].goTo(245)

Categories

Resources