Skip to content

Commit

Permalink
added temp diag class UML
Browse files Browse the repository at this point in the history
  • Loading branch information
HadrienCazes committed May 7, 2020
1 parent 3085237 commit 19cbeb3
Show file tree
Hide file tree
Showing 8 changed files with 10 additions and 9 deletions.
Binary file added diagrammeClasseTemporaire.pdf
Binary file not shown.
6 changes: 3 additions & 3 deletions parser/expressionsFactory.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import "../tokenizer/constants" as constTokens
import "./constants") as constParser
import "./helper" as helper
from tokenizer import constants as constTokens
from parser import constants as constParser
from parser import helper as helper

def create(tokenType,tokens,start):
if tokenType == constParser.expressionMethodCall:
Expand Down
4 changes: 2 additions & 2 deletions parser/helper.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import "../tokenizer/constants" as constTokens
import "./constants" as constParser
from tokenizer import constants as constTokens
from parser import constants as constParser

def searchString(tokens, start):
string=[]
Expand Down
6 changes: 3 additions & 3 deletions parser/parser.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import "../tokenizer/constants" as constTokens
import "./constants" as constParser
import "./expressionsFactory" as factory
from tokenizer import constants as constTokens
from parser import constants as constParser
from parser import expressionsFactory as factory

def parser(tokens):
AST=[]
Expand Down
Binary file modified tokenizer/__pycache__/constants.cpython-38.pyc
Binary file not shown.
Binary file modified tokenizer/__pycache__/tokenizer.cpython-38.pyc
Binary file not shown.
1 change: 1 addition & 0 deletions tokenizer/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,6 @@
"divide": {"regRule": "/", "value":"/"},
"modulo": {"regRule": "%", "value":"%"},
"printFunction": {"regRule": "print", "value":"print"},
"defFunction" : {"regRule": "def", "value":"def"},
"#": {"regRule": "#", "value":"#"}
}
2 changes: 1 addition & 1 deletion tokenizer/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def tokenizer():
new_source = helper.replaceSpecialsChars(source)
token_list = []
new_source2 = re.split("[\t\f\v ]+", new_source)
print(new_source2)
#print(new_source2)
for word in new_source2:
if not word.isdigit():
#if len(word)<=0 or word==None:
Expand Down

0 comments on commit 19cbeb3

Please sign in to comment.