Cyclic relations between nodes A>B>C>A - python

This is how it looks like before and after, for the problem am trying to solve using Python. I have been trying for weeks. And am failing so miserable to tell Python to do the following:
STEP1: If you find on this document: "LinkedTo=" * (Example value: Node_3)*
STEP2: Then Stop
STEP3: Go to the previous NodePosX= and copy the value * (Example value: 10)*
STEP4: Go to the previous NotePosY= and copy the value * (Example value: 100)*
STEP5: Then find the next "Node_3" on the document
STEP6: And replace inside the NodePosX=30 and NodePosY=300 for the copied values 10 and 100
STEP7: Then look for the next "LinkedTo=" * (Example value: Node_5)* and repeat the STEP2 to STEP5
This is how it looks like Before running the Python script:
Begin
Name="Node_1"
NodePosX=10
NodePosY=100
LinkedTo=Node_3
LinkedTo=Node_5
End Object
Begin
Name="Node_2"
NodePosX=20
NodePosY=200
End Object
Begin
Name="Node_3"
NodePosX=30
NodePosY=300
End Object
Begin
Name="Node_4"
NodePosX=40
NodePosY=400
End Object
Begin
Name="Node_5"
NodePosX=50
NodePosY=500
End Object
This is how it should look like AFTER running the Python script:
Begin
Name="Node_1"
NodePosX=10
NodePosY=100
LinkedTo=Node_3
LinkedTo=Node_5
End Object
Begin
Name="Node_2"
NodePosX=20
NodePosY=200
End Object
Begin
Name="Node_3"
NodePosX=10
NodePosY=100
End Object
Begin
Name="Node_4"
NodePosX=40
NodePosY=400
End Object
Begin
Name="Node_5"
NodePosX=10
NodePosY=100
End Object
Do you think am asking to much from Python to do?
Any better suggestions for the title to this problem?

I hired a developer and this is the code they wrote
'''
By: Alex Reichenbach
'''
import re
begin_regex = re.compile("Begin")
name_regex = "(?<=Name=\"Node_).*(?=\")"
posX_regex = "(?<=NodePosX=).*"
posY_regex = "(?<=NodePosY=).*"
linkedTo_regex = "(?<=LinkedTo=).*"
end_regex = re.compile("End Object")
## Reading the contents of the file
text = open("1-Example-Original.txt", "r").read()
class Node:
def __init__(self):
self.name = ""
self.nodePosX = 0
self.nodePosY = 0
self.linked_to = []
def __str__(self):
linked = ""
for l in self.linked_to:
linked += "\nLinkedTo="+l
return """Begin
Name="%s"
NodePosX=%s
NodePosY=%s%s
End Object
"""%(self.name, self.nodePosX, self.nodePosY, linked)
## Read the text into the node objects
nodes = []
current_node = None
for line in text.split('\n'): ## Iterate through each line
if begin_regex.match(line): ## Begin
current_node = Node()
nodes.append(current_node)
elif re.findall(name_regex, line): ## Name
name = re.findall(name_regex, line)[0]
current_node.name = name
elif re.findall(posX_regex, line): ## PosX
posX = re.findall(posX_regex, line)[0]
current_node.nodePosX = posX
elif re.findall(posY_regex, line): ## PosY
posY = re.findall(posY_regex, line)[0]
current_node.nodePosY = posY
elif re.findall(linkedTo_regex, line): ## LinkedTo
name = re.findall(linkedTo_regex, line)[0]
current_node.linked_to.append(name)
## Copy the linked_to attributes
for i in range(len(nodes)):
for j in range(i, len(nodes)):
node1 = nodes[i]
node2 = nodes[j]
if node2.name in node1.linked_to:
node2.nodePosX = node1.nodePosX
node2.nodePosY = node1.nodePosY
## Print it all out
s = ""
for node in nodes:
s += str(node)
print(s)
## Write to File?
open("_edited.txt", "w").write(s)

Related

Depth first search algorithm skipping spaces in maze?

After concluding the first lecture of Harvard's AI course on edX, I have decided to implement the concepts taught, first being the depth-first search algorithm.
The objective of this program is to input a maze in text file mazefile and find a path from S to G using the depth-first search algorithm.
The project currently consists of 4 files, (1) the code with the class methods to operate or use the (2) text file which contains the maze, another text file (3) that contains the result file (where the AI has explored) and the main python script (4). Here they are, feel free to copy and paste these into a folder and to see how they run.
processText.py (file 1)
#code to process the mazefile file.
class importMaze:
def __init__(self,maze):
self.fileLines = []
self.fileName = maze
self.switch = False
self.toBeReturned = []
def processThis(self):
f = open(self.fileName,"r")
for x in f:
self.fileLines.append(x[:-1])
f.close()
for i in self.fileLines:
if self.switch == True:
if str(i) == "END":
self.switch = False
else:
self.toBeReturned.append(i)
else:
if str(i) == "START":
self.switch = True
return self.toBeReturned
class mazePointer:
def __init__(self,mazearray):
self.Sample = mazearray
self.initialPosition = []
for y in range(0, len(self.Sample)):
for x in range(0,len(self.Sample[y])):
if str(self.Sample[y][x]) == "S":
self.initialPosition = [x,y]
self.currentPosition = self.initialPosition
def whatIs(self,xcoordinate,ycoordinate):
return (self.Sample[ycoordinate])[xcoordinate]
def nearbyFreeSpaces(self,search):
self.freeSpaces = []
if self.whatIs(self.currentPosition[0]-1,self.currentPosition[1]) == search:
self.freeSpaces.append([self.currentPosition[0]-1,self.currentPosition[1]])
if self.whatIs(self.currentPosition[0]+1,self.currentPosition[1]) == search:
self.freeSpaces.append([self.currentPosition[0]+1,self.currentPosition[1]])
if self.whatIs(self.currentPosition[0],self.currentPosition[1]-1) == search:
self.freeSpaces.append([self.currentPosition[0],self.currentPosition[1]-1])
if self.whatIs(self.currentPosition[1],self.currentPosition[1]+1) == search:
self.freeSpaces.append([self.currentPosition[1],self.currentPosition[1]+1])
return self.freeSpaces
def moveTo(self,position):
self.currentPosition = position
TestingTrack.py (the main file)
from processText import importMaze, mazePointer
testObject = importMaze("mazefile")
environment = testObject.processThis()
finger = mazePointer(environment)
frontier = []
explored = []
result = ""
def Search():
global result
if len(finger.nearbyFreeSpaces("G")) == 1: #If the goal is bordering this space
result = finger.nearbyFreeSpaces("G")[0]
explored.append(finger.currentPosition)
else:
newPlaces = finger.nearbyFreeSpaces("F") #finds the free spaces bordering
for i in newPlaces:
if i in explored: #Skips the ones already visited
pass
else:
frontier.append(i)
while result == "":
explored.append(finger.currentPosition)
Search()
finger.moveTo(frontier[-1])
frontier.pop(-1)
exploredArray = []
for y in range(len(environment)): #Recreates the maze, fills in 'E' in where the AI has visited.
holder = ""
for x in range(len(environment[y])):
if [x,y] in explored:
holder+= "E"
else:
holder+= str(environment[y][x])
exploredArray.append(holder)
def createResult(mazeList,title,append): #Creating the file
file = open("resultfile",append)
string = title + " \n F - Free \n O - Occupied \n S - Starting point \n G - Goal \n E - Explored/Visited \n (Abdulaziz Albastaki 2020) \n \n (top left coordinate - 0,0) \n "
for i in exploredArray:
string+= "\n" + str(i)
string+= "\n \n Original problem \n"
for i in environment:
string+= "\n" +str(i)
file.write(string)
file.close()
def tracingPath():
initialExplored = explored
proceed = True
newExplored = []
for i in explored:
finger.moveTo() #incomplete
print(explored)
createResult(exploredArray,"DEPTH FIRST SEARCH", "w")
mazefile (the program will read this file to get the maze)
F - Free
O - Occupied
S - Starting point
G - Goal
(Abdulaziz Albastaki 2020)
START
OOOOOOOOOOOOOOOO
OFFFFFFFFFFFFFGO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OSFFFFFFFFFFFFFO
OOOOOOOOOOOOOOOO
END
Made by Abdulaziz Albastaki in October 2020
You can change the maze and its size however it must
-Respect the key above
-Have ONE Starting point and goal
-The maze must be in between 'START' and 'END'
-The maze MUST be surrounded by occupied space
SAMPLE PROBLEMS:
OOOOOOOOOOOOOOOO
OFFFFFFFFFFFFFGO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OSFFFFFFFFFFFFFO
OOOOOOOOOOOOOOOO
OOOOOOOOOOOOOOOOO
OFOFFFFFOOOFFFOOO
OFFFOOOFOOFFOOOFO
OFOOOOOFOOFOOOOFO
OSFGFFFFFFFFFFFFO
OOOOOOOOOOOOOOOOO
There is also a resultfile, however if you would just create an empty textfile with that name (no extension), the program will fill it in with results.
The problem is with the resultfile, here it is:
DEPTH FIRST SEARCH
F - Free
O - Occupied
S - Starting point
G - Goal
E - Explored/Visited
(Abdulaziz Albastaki 2020)
(top left coordinate - 0,0)
OOOOOOOOOOOOOOOO
OFFFFFFFFFFFFFGO
OFOOOOOOOOOOOOEO
OFOOOOOOOOOOOOEO
OFOOOOOOOOOOOOEO
OEOOOOOOOOOOOOEO
OEFFFEEEEEEEEEEO
OOOOOOOOOOOOOOOO
Original problem
OOOOOOOOOOOOOOOO
OFFFFFFFFFFFFFGO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OFOOOOOOOOOOOOFO
OSFFFFFFFFFFFFFO
OOOOOOOOOOOOOOOO
The AI skipped a few spaces to get to the goal, why is it doing so?
Feel free to ask me for any clarifications.
There are the following issues:
the last if block in nearbyFreeSpaces uses a wrong index:
if self.whatIs(self.currentPosition[1],self.currentPosition[1]+1) == search:
self.freeSpaces.append([self.currentPosition[1],self.currentPosition[1]+1])
should be:
if self.whatIs(self.currentPosition[0],self.currentPosition[1]+1) == search:
self.freeSpaces.append([self.currentPosition[0],self.currentPosition[1]+1])
The final position is not correctly added to the path. The last line of this block:
if len(finger.nearbyFreeSpaces("G")) == 1: #If the goal is bordering this space
result = finger.nearbyFreeSpaces("G")[0]
explored.append(finger.currentPosition)
...should be:
explored.append(result)

List index errror

I have created a program which reads a file line into a list. When i run the code below, i see that there is a list with elements inside it.
dogs_list_player = []
dogs_list_computer = []
with open("dogs.txt") as f:
for i in range(Y):
dogs_list_player.append(f.readline().splitlines())
print(dogs_list_player)
for i in range(Z):
dogs_list_computer.append(f.readline().splitlines())
print(dogs_list_computer)
The result is:
[['Tim']]
[['Tim'], ['Bob']]
[['Tim'], ['Bob'], ['Jess']]
[['Tim'], ['Bob'], ['Jess'], ['Bess']]
[['Tim'], ['Bob'], ['Jess'], ['Bess'], ['Tess']]
[['Dom']]
[['Dom'], ['Tom']]
[['Dom'], ['Tom'], ['Will']]
[['Dom'], ['Tom'], ['Will'], ['Ben']]
[['Dom'], ['Tom'], ['Will'], ['Ben'], ['Joe']]
But the issue arises when i add this part of code:
dogs_list_player = []
dogs_list_computer = []
with open("dogs.txt") as f:
for i in range(Y):
dogs_list_player.append(f.readline().splitlines())
print(dogs_list_player)
for i in range(Z):
dogs_list_computer.append(f.readline().splitlines())
print(dogs_list_computer)
class Dog_card:
name = ""
friendliness = ""
intelligence = ""
exercise = ""
drool = ""
def printing_card(self):
prnt_str = "Name:%s \nIntelligence:%s \nExercise:%s \nDrool:%s" %(self.name, self.friendliness, self.intelligence, self.exercise, self.drool)
return prnt_str
player_card = Dog_card()
card_count = 0
player.name = dogs_list_player[0]#i think this is where the issue is happening
the result of this code is:
IndexError: list index out of range
Any help would be appreciated
your Dog_card class has some errors :
you forget "friendliness" parameter in the string that is in printing_card method and also you have not any constructor in your class .
class Dog_card:
def __init__(self):
self.name = ""
self.friendliness = ""
self.intelligence = ""
self.exercise = ""
self.drool = ""
def printing_card(self):
prnt_str = "Name:%s \nIntelligence:%s friendliness:%s \nExercise:%s \nDrool:%s" %(self.name, self.friendliness, self.intelligence, self.exercise, self.drool)
return prnt_str
this shoud be work .

Run python script to replace betacode with greek letters LaTeX

I want to convert the betacode in an existing .tex-File to normal greek letters.
For example: I want to replace:
\bcode{lo/gos}
with simple:
λόγος
And so on for all other glyphs. Fortunately there seems to be a python-script that is supposed to do just that. But, being completely inexperienced I simply don’t know how to run it.
Here is the code of the python sript:
# beta2unicode.py
#
# Version 2004-11-23
#
# James Tauber
# http://jtauber.com/
#
# You are free to redistribute this, but please inform me of any errors
#
# USAGE:
#
# trie = beta2unicodeTrie()
# beta = "LO/GOS\n";
# unicode, remainder = trie.convert(beta)
#
# - to get final sigma, string must end in \n
# - remainder will contain rest of beta if not all can be converted
class Trie:
def __init__(self):
self.root = [None, {}]
def add(self, key, value):
curr_node = self.root
for ch in key:
curr_node = curr_node[1].setdefault(ch, [None, {}])
curr_node[0] = value
def find(self, key):
curr_node = self.root
for ch in key:
try:
curr_node = curr_node[1][ch]
except KeyError:
return None
return curr_node[0]
def findp(self, key):
curr_node = self.root
remainder = key
for ch in key:
try:
curr_node = curr_node[1][ch]
except KeyError:
return (curr_node[0], remainder)
remainder = remainder[1:]
return (curr_node[0], remainder)
def convert(self, keystring):
valuestring = ""
key = keystring
while key:
value, key = self.findp(key)
if not value:
return (valuestring, key)
valuestring += value
return (valuestring, key)
def beta2unicodeTrie():
t = Trie()
t.add("*A", u"\u0391")
t.add("*B", u"\u0392")
t.add("*G", u"\u0393")
t.add("*D", u"\u0394")
t.add("*E", u"\u0395")
t.add("*Z", u"\u0396")
t.add("*H", u"\u0397")
t.add("*Q", u"\u0398")
t.add("*I", u"\u0399")
t.add("*K", u"\u039A")
t.add("*L", u"\u039B")
t.add("*M", u"\u039C")
t.add("*N", u"\u039D")
t.add("*C", u"\u039E")
t.add("*O", u"\u039F")
t.add("*P", u"\u03A0")
t.add("*R", u"\u03A1")
t.add("*S", u"\u03A3")
t.add("*T", u"\u03A4")
t.add("*U", u"\u03A5")
t.add("*F", u"\u03A6")
t.add("*X", u"\u03A7")
t.add("*Y", u"\u03A8")
t.add("*W", u"\u03A9")
t.add("A", u"\u03B1")
t.add("B", u"\u03B2")
t.add("G", u"\u03B3")
t.add("D", u"\u03B4")
t.add("E", u"\u03B5")
t.add("Z", u"\u03B6")
t.add("H", u"\u03B7")
t.add("Q", u"\u03B8")
t.add("I", u"\u03B9")
t.add("K", u"\u03BA")
t.add("L", u"\u03BB")
t.add("M", u"\u03BC")
t.add("N", u"\u03BD")
t.add("C", u"\u03BE")
t.add("O", u"\u03BF")
t.add("P", u"\u03C0")
t.add("R", u"\u03C1")
t.add("S\n", u"\u03C2")
t.add("S,", u"\u03C2,")
t.add("S.", u"\u03C2.")
t.add("S:", u"\u03C2:")
t.add("S;", u"\u03C2;")
t.add("S]", u"\u03C2]")
t.add("S#", u"\u03C2#")
t.add("S_", u"\u03C2_")
t.add("S", u"\u03C3")
t.add("T", u"\u03C4")
t.add("U", u"\u03C5")
t.add("F", u"\u03C6")
t.add("X", u"\u03C7")
t.add("Y", u"\u03C8")
t.add("W", u"\u03C9")
t.add("I+", U"\u03CA")
t.add("U+", U"\u03CB")
t.add("A)", u"\u1F00")
t.add("A(", u"\u1F01")
t.add("A)\\", u"\u1F02")
t.add("A(\\", u"\u1F03")
t.add("A)/", u"\u1F04")
t.add("A(/", u"\u1F05")
t.add("E)", u"\u1F10")
t.add("E(", u"\u1F11")
t.add("E)\\", u"\u1F12")
t.add("E(\\", u"\u1F13")
t.add("E)/", u"\u1F14")
t.add("E(/", u"\u1F15")
t.add("H)", u"\u1F20")
t.add("H(", u"\u1F21")
t.add("H)\\", u"\u1F22")
t.add("H(\\", u"\u1F23")
t.add("H)/", u"\u1F24")
t.add("H(/", u"\u1F25")
t.add("I)", u"\u1F30")
t.add("I(", u"\u1F31")
t.add("I)\\", u"\u1F32")
t.add("I(\\", u"\u1F33")
t.add("I)/", u"\u1F34")
t.add("I(/", u"\u1F35")
t.add("O)", u"\u1F40")
t.add("O(", u"\u1F41")
t.add("O)\\", u"\u1F42")
t.add("O(\\", u"\u1F43")
t.add("O)/", u"\u1F44")
t.add("O(/", u"\u1F45")
t.add("U)", u"\u1F50")
t.add("U(", u"\u1F51")
t.add("U)\\", u"\u1F52")
t.add("U(\\", u"\u1F53")
t.add("U)/", u"\u1F54")
t.add("U(/", u"\u1F55")
t.add("W)", u"\u1F60")
t.add("W(", u"\u1F61")
t.add("W)\\", u"\u1F62")
t.add("W(\\", u"\u1F63")
t.add("W)/", u"\u1F64")
t.add("W(/", u"\u1F65")
t.add("A)=", u"\u1F06")
t.add("A(=", u"\u1F07")
t.add("H)=", u"\u1F26")
t.add("H(=", u"\u1F27")
t.add("I)=", u"\u1F36")
t.add("I(=", u"\u1F37")
t.add("U)=", u"\u1F56")
t.add("U(=", u"\u1F57")
t.add("W)=", u"\u1F66")
t.add("W(=", u"\u1F67")
t.add("*A)", u"\u1F08")
t.add("*)A", u"\u1F08")
t.add("*A(", u"\u1F09")
t.add("*(A", u"\u1F09")
#
t.add("*(\A", u"\u1F0B")
t.add("*A)/", u"\u1F0C")
t.add("*)/A", u"\u1F0C")
t.add("*A(/", u"\u1F0F")
t.add("*(/A", u"\u1F0F")
t.add("*E)", u"\u1F18")
t.add("*)E", u"\u1F18")
t.add("*E(", u"\u1F19")
t.add("*(E", u"\u1F19")
#
t.add("*(\E", u"\u1F1B")
t.add("*E)/", u"\u1F1C")
t.add("*)/E", u"\u1F1C")
t.add("*E(/", u"\u1F1D")
t.add("*(/E", u"\u1F1D")
t.add("*H)", u"\u1F28")
t.add("*)H", u"\u1F28")
t.add("*H(", u"\u1F29")
t.add("*(H", u"\u1F29")
t.add("*H)\\", u"\u1F2A")
t.add(")\\*H", u"\u1F2A")
t.add("*)\\H", u"\u1F2A")
#
t.add("*H)/", u"\u1F2C")
t.add("*)/H", u"\u1F2C")
#
t.add("*)=H", u"\u1F2E")
t.add("(/*H", u"\u1F2F")
t.add("*(/H", u"\u1F2F")
t.add("*I)", u"\u1F38")
t.add("*)I", u"\u1F38")
t.add("*I(", u"\u1F39")
t.add("*(I", u"\u1F39")
#
#
t.add("*I)/", u"\u1F3C")
t.add("*)/I", u"\u1F3C")
#
#
t.add("*I(/", u"\u1F3F")
t.add("*(/I", u"\u1F3F")
#
t.add("*O)", u"\u1F48")
t.add("*)O", u"\u1F48")
t.add("*O(", u"\u1F49")
t.add("*(O", u"\u1F49")
#
#
t.add("*(\O", u"\u1F4B")
t.add("*O)/", u"\u1F4C")
t.add("*)/O", u"\u1F4C")
t.add("*O(/", u"\u1F4F")
t.add("*(/O", u"\u1F4F")
#
t.add("*U(", u"\u1F59")
t.add("*(U", u"\u1F59")
#
t.add("*(/U", u"\u1F5D")
#
t.add("*(=U", u"\u1F5F")
t.add("*W)", u"\u1F68")
t.add("*W(", u"\u1F69")
t.add("*(W", u"\u1F69")
#
#
t.add("*W)/", u"\u1F6C")
t.add("*)/W", u"\u1F6C")
t.add("*W(/", u"\u1F6F")
t.add("*(/W", u"\u1F6F")
t.add("*A)=", u"\u1F0E")
t.add("*)=A", u"\u1F0E")
t.add("*A(=", u"\u1F0F")
t.add("*W)=", u"\u1F6E")
t.add("*)=W", u"\u1F6E")
t.add("*W(=", u"\u1F6F")
t.add("*(=W", u"\u1F6F")
t.add("A\\", u"\u1F70")
t.add("A/", u"\u1F71")
t.add("E\\", u"\u1F72")
t.add("E/", u"\u1F73")
t.add("H\\", u"\u1F74")
t.add("H/", u"\u1F75")
t.add("I\\", u"\u1F76")
t.add("I/", u"\u1F77")
t.add("O\\", u"\u1F78")
t.add("O/", u"\u1F79")
t.add("U\\", u"\u1F7A")
t.add("U/", u"\u1F7B")
t.add("W\\", u"\u1F7C")
t.add("W/", u"\u1F7D")
t.add("A)/|", u"\u1F84")
t.add("A(/|", u"\u1F85")
t.add("H)|", u"\u1F90")
t.add("H(|", u"\u1F91")
t.add("H)/|", u"\u1F94")
t.add("H)=|", u"\u1F96")
t.add("H(=|", u"\u1F97")
t.add("W)|", u"\u1FA0")
t.add("W(=|", u"\u1FA7")
t.add("A=", u"\u1FB6")
t.add("H=", u"\u1FC6")
t.add("I=", u"\u1FD6")
t.add("U=", u"\u1FE6")
t.add("W=", u"\u1FF6")
t.add("I\\+", u"\u1FD2")
t.add("I/+", u"\u1FD3")
t.add("I+/", u"\u1FD3")
t.add("U\\+", u"\u1FE2")
t.add("U/+", u"\u1FE3")
t.add("A|", u"\u1FB3")
t.add("A/|", u"\u1FB4")
t.add("H|", u"\u1FC3")
t.add("H/|", u"\u1FC4")
t.add("W|", u"\u1FF3")
t.add("W|/", u"\u1FF4")
t.add("W/|", u"\u1FF4")
t.add("A=|", u"\u1FB7")
t.add("H=|", u"\u1FC7")
t.add("W=|", u"\u1FF7")
t.add("R(", u"\u1FE4")
t.add("*R(", u"\u1FEC")
t.add("*(R", u"\u1FEC")
# t.add("~", u"~")
# t.add("-", u"-")
# t.add("(null)", u"(null)")
# t.add("&", "&")
t.add("0", u"0")
t.add("1", u"1")
t.add("2", u"2")
t.add("3", u"3")
t.add("4", u"4")
t.add("5", u"5")
t.add("6", u"6")
t.add("7", u"7")
t.add("8", u"8")
t.add("9", u"9")
t.add("#", u"#")
t.add("$", u"$")
t.add(" ", u" ")
t.add(".", u".")
t.add(",", u",")
t.add("'", u"'")
t.add(":", u":")
t.add(";", u";")
t.add("_", u"_")
t.add("[", u"[")
t.add("]", u"]")
t.add("\n", u"")
return t
t = beta2unicodeTrie()
import sys
for line in file(sys.argv[1]):
a, b = t.convert(line)
if b:
print a.encode("utf-8"), b
raise Exception
print a.encode("utf-8")
And here is a little .tex-file with which it should work.
\documentclass[12pt]{scrbook}
\usepackage[polutonikogreek, ngerman]{babel}
\usepackage[ngerman]{betababel}
\usepackage{fontspec}
%\defaultfontfeatures{Ligatures=TeX}
%\newfontfeature{Microtype}{protrusion=default;expansion=default;}
\begin{document}
\bcode{lo/gos}
\end{document}
In case the script does not work: would it be possible to convert all the strings within the \bcode-Makro with something like regex? For example the "o/" to the ό and so on? What would be the weapon of choice here?
Do I have python installed?
Try python -V at a shell prompt. Your code is python 2 code, so you will a python 2 version.
I need to install Python
Most straight forward way if you don't need a complex environment (and you don't for this problem) is just to go to python.org. Don't forget you need python 2.
Running the program
Generally it will be as simple as:
python beta2unicode.py myfile.tex-file
And to capture the output:
python beta2unicode.py myfile.tex-file > myfile.not-tex-file
Does the script work?
Almost. You will need to replace the code at the end of the script that starts the same way this does, with this:
import sys
t = beta2unicodeTrie()
import re
BCODE = re.compile(r'\\bcode{[^}]*}')
for line in open(sys.argv[1]):
matches = BCODE.search(line)
for match in BCODE.findall(line):
bcode = match[7:-1]
a, b = t.convert(bcode.upper())
if b:
raise IOError("failed conversion '%s' in '%s'" % (b, line))
converted = a.encode("utf-8")
line = line.replace(match, converted)
print(line.rstrip())
Results
\documentclass[12pt]{scrbook}
\usepackage[polutonikogreek, ngerman]{babel}
\usepackage[ngerman]{betababel}
\usepackage{fontspec}
%\defaultfontfeatures{Ligatures=TeX}
%\newfontfeature{Microtype}{protrusion=default;expansion=default;}
\begin{document}
λόγοσ
\end{document}

Python unified diff with line numbers from both "files"

I'm trying to figure out a way to create unified diffs with line numbers only showing N lines of context. I have been unable to do this with difflib.unified_diff. I need to show changes in both files.
The closest I can come is using diff on the command line like so:
/usr/bin/diff
--unchanged-line-format=' %.2dn %L'
--old-line-format="-%.2dn %L"
--new-line-format="+%.2dn %L"
file1.py
file2.py
BUT I only want to show N lines of context, and /usr/bin/diff doesn't seem to support context with a custom line format (eg. -U2 is not compatible with --line-format "conflicting output style options").
Below is an example of what I'd like to accomplish (the same output as the above diff, but only showing 1 line of context surrounding changes):
+01: def renamed_function()
-01: def original_function():
02:
+03: """ Neat stuff here """
04:
21:
+22: # Here's a new comment
23:
85: # Output the value of foo()
+86: print "Foo is %s"%(foo())
-86: print foo()
87:
I was able to figure out something very close to what I wanted to do. It's slower than regular diff, though. Here's the entire code, from my project GitGate.
def unified_diff(to_file_path, from_file_path, context=1):
""" Returns a list of differences between two files based
on some context. This is probably over-complicated. """
pat_diff = re.compile(r'## (.[0-9]+\,[0-9]+) (.[0-9]+,[0-9]+) ##')
from_lines = []
if os.path.exists(from_file_path):
from_fh = open(from_file_path,'r')
from_lines = from_fh.readlines()
from_fh.close()
to_lines = []
if os.path.exists(to_file_path):
to_fh = open(to_file_path,'r')
to_lines = to_fh.readlines()
to_fh.close()
diff_lines = []
lines = difflib.unified_diff(to_lines, from_lines, n=context)
for line in lines:
if line.startswith('--') or line.startswith('++'):
continue
m = pat_diff.match(line)
if m:
left = m.group(1)
right = m.group(2)
lstart = left.split(',')[0][1:]
rstart = right.split(',')[0][1:]
diff_lines.append("## %s %s ##\n"%(left, right))
to_lnum = int(lstart)
from_lnum = int(rstart)
continue
code = line[0]
lnum = from_lnum
if code == '-':
lnum = to_lnum
diff_lines.append("%s%.4d: %s"%(code, lnum, line[1:]))
if code == '-':
to_lnum += 1
elif code == '+':
from_lnum += 1
else:
to_lnum += 1
from_lnum += 1
return diff_lines

Two issue about python OpenOPC library

Issues description and environments
The OpenOPC library is friendly and easy to use, the api is simple too, but I have found two issues during the development of a tool to record real time OPC items data.
The development environment is: Window 8.1, Python 2.7.6, wxpython 2.8 unicode
The testing environment is: Window XP SP3, Python 2.7.6, wxpython 2.8 unicode, Rockwell's soft logix as OPC Server
The deploy environment is: Window XP SP3, connected with Rockwell's real PLC, installed RSLogix 5000 and RSLinx Classic Gateway
Questions
the opc.list function doesn't list all the item of specify node both in testing and workstaion environment. The question is how to list the 't' from the opc server?
An int array 'dint100' and a dint 't' is added with RS logix 5000 at the scope of soft_1
With the default OPC client test tool from Rockwell it could list the new added 't'
With OpenOPC library, I couldn't find out how to list the item 't', but I could read it's value by opc.read('[soft_1]t') with it's tag.
If the 't' could be listed, it could be added into the IO tree of my tool.
The opc.servers function will encounter an OPCError on the deploy environment, but the client could connect the 'RSLinx OPC Server' directly with the server name. Does opc.servers function dependent on some special dll or service?
Any suggestions will be appreciated! Thanks in advance!
Consider that the browsing problems ("opc.list") may not be on your side. RSLinx is notorious for its broken OPC browsing. Try some test/simulation server from a different vendor, to test this hypothesis.
I realize that I'm really late to this game. I found what was causing this issue. OpenOPC.py assumes that there cannot be both a "Leaf" and a "Branch" on the same level. Replace the function ilist with this:
def ilist(self, paths='*', recursive=False, flat=False, include_type=False):
"""Iterable version of list()"""
try:
self._update_tx_time()
pythoncom.CoInitialize()
try:
browser = self._opc.CreateBrowser()
# For OPC servers that don't support browsing
except:
return
paths, single, valid = type_check(paths)
if not valid:
raise TypeError("list(): 'paths' parameter must be a string or a list of strings")
if len(paths) == 0: paths = ['*']
nodes = {}
for path in paths:
if flat:
browser.MoveToRoot()
browser.Filter = ''
browser.ShowLeafs(True)
pattern = re.compile('^%s$' % wild2regex(path) , re.IGNORECASE)
matches = filter(pattern.search, browser)
if include_type: matches = [(x, node_type) for x in matches]
for node in matches: yield node
continue
queue = []
queue.append(path)
while len(queue) > 0:
tag = queue.pop(0)
browser.MoveToRoot()
browser.Filter = ''
pattern = None
path_str = '/'
path_list = tag.replace('.','/').split('/')
path_list = [p for p in path_list if len(p) > 0]
found_filter = False
path_postfix = '/'
for i, p in enumerate(path_list):
if found_filter:
path_postfix += p + '/'
elif p.find('*') >= 0:
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
found_filter = True
elif len(p) != 0:
pattern = re.compile('^.*$')
browser.ShowBranches()
# Branch node, so move down
if len(browser) > 0:
try:
browser.MoveDown(p)
path_str += p + '/'
except:
if i < len(path_list)-1: return
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
# Leaf node, so append all remaining path parts together
# to form a single search expression
else:
###################################### JG Edit - Flip the next two rows comment/uncommented
p = '.'.join(path_list[i:])
# p = string.join(path_list[i:], '.')
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
break
###################################### JG Edit - Comment this to return to original
browser.ShowBranches()
node_types = ['Branch','Leaf']
if len(browser) == 0:
lowest_level = True
node_types.pop(0)
else:
lowest_level = False
for node_type in node_types:
if node_type=='Leaf':
browser.ShowLeafs(False)
matches = filter(pattern.search, browser)
if not lowest_level and recursive:
queue += [path_str + x + path_postfix for x in matches]
else:
###################################### JG Edit - Flip the next two rows comment/uncommented
if lowest_level or node_type=='Leaf': matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
# if lowest_level: matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
if include_type: matches = [(x, node_type) for x in matches]
for node in matches:
if not node in nodes: yield node
nodes[node] = True
###################################### Uncomment this to return to original
# browser.ShowBranches()
# if len(browser) == 0:
# browser.ShowLeafs(False)
# lowest_level = True
# node_type = 'Leaf'
# else:
# lowest_level = False
# node_type = 'Branch'
# matches = filter(pattern.search, browser)
# if not lowest_level and recursive:
# queue += [path_str + x + path_postfix for x in matches]
# else:
# if lowest_level: matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
# if include_type: matches = [(x, node_type) for x in matches]
# for node in matches:
# if not node in nodes: yield node
# nodes[node] = True
except pythoncom.com_error as err:
error_msg = 'list: %s' % self._get_error_str(err)
raise OPCError(error_msg)

Categories

Resources