Skip to content

Commit 7fa2ad8

Browse files
committed
fix errors raised from rebasing/merging versions branch
1 parent f17fd5f commit 7fa2ad8

File tree

4 files changed

+61
-62
lines changed

4 files changed

+61
-62
lines changed

pydifact/token.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,24 +23,23 @@
2323
from enum import Enum
2424

2525

26-
class TokenType(Enum):
27-
CTRL_CHARS = 10 # ASCII string holding the control chars
28-
CONTENT = 11
29-
COMPONENT_SEPARATOR = 12 # default :
30-
DATA_SEPARATOR = 13 # default +
31-
TERMINATOR = 14 # default '
32-
33-
3426
class Token:
3527
"""Represents a block of characters in the message.
3628
3729
This could be content, a data separator (usually +),
3830
a component data separator (usually :), or a segment terminator (usually ').
3931
"""
4032

41-
def __init__(self, token_type: TokenType, value: str):
33+
class Type(Enum):
34+
CTRL_CHARS = 10 # ASCII string holding the control chars
35+
CONTENT = 11
36+
COMPONENT_SEPARATOR = 12 # default :
37+
DATA_SEPARATOR = 13 # default +
38+
TERMINATOR = 14 # default '
39+
40+
def __init__(self, token_type: Type, value: str):
4241
"""Creates a Token with a type and a value"""
43-
if not isinstance(token_type, TokenType):
42+
if not isinstance(token_type, self.Type):
4443
raise TypeError(f"Invalid token type: {token_type}")
4544

4645
self.type = token_type

tests/test_parser.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
#
1414
# You should have received a copy of the GNU Lesser General Public License
1515
# along with this program. If not, see <http://www.gnu.org/licenses/>.
16-
from pydifact.exceptions import EdifactSyntaxError
16+
from pydifact.exceptions import EDISyntaxError
1717
from pydifact.parser import Parser
1818
from pydifact.segments import Segment
1919
from pydifact.control.characters import Characters
@@ -264,7 +264,7 @@ def test_parsing_with_passed_characters_but_respect_una():
264264

265265

266266
def test_message_end_without_control_char():
267-
with pytest.raises(EdifactSyntaxError):
267+
with pytest.raises(EDISyntaxError):
268268
# must raise a RuntimeError as the string terminates abruptly within a segment
269269
for c in Parser().parse("UNB+IBMA:1+BLUBB A+FOO X+950"):
270270
pass

tests/test_token.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,16 @@
1515
# along with this program. If not, see <http://www.gnu.org/licenses/>.
1616
import pytest
1717

18-
from pydifact.token import Token, TokenType
18+
from pydifact.token import Token
1919

2020

2121
def test_type():
22-
token = Token(TokenType.CONTENT, "ok")
23-
assert TokenType.CONTENT == token.type
22+
token = Token(Token.Type.CONTENT, "ok")
23+
assert Token.Type.CONTENT == token.type
2424

2525

2626
def test_value():
27-
token = Token(TokenType.CONTENT, "ok")
27+
token = Token(Token.Type.CONTENT, "ok")
2828
assert token.value == "ok"
2929

3030

tests/test_tokenizer.py

Lines changed: 46 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
# along with this program. If not, see <http://www.gnu.org/licenses/>.
1616
import pytest
1717

18-
from pydifact.exceptions import EdifactSyntaxError
19-
from pydifact.token import Token, TokenType
18+
from pydifact.exceptions import EDISyntaxError
19+
from pydifact.token import Token
2020
from pydifact.tokenizer import Tokenizer
2121

2222
from pydifact.control import Characters
@@ -47,12 +47,12 @@ def test_basic():
4747
_assert_tokens(
4848
"RFF+PD:50515'",
4949
[
50-
Token(TokenType.CONTENT, "RFF"),
51-
Token(TokenType.DATA_SEPARATOR, "+"),
52-
Token(TokenType.CONTENT, "PD"),
53-
Token(TokenType.COMPONENT_SEPARATOR, ":"),
54-
Token(TokenType.CONTENT, "50515"),
55-
Token(TokenType.TERMINATOR, "'"),
50+
Token(Token.Type.CONTENT, "RFF"),
51+
Token(Token.Type.DATA_SEPARATOR, "+"),
52+
Token(Token.Type.CONTENT, "PD"),
53+
Token(Token.Type.COMPONENT_SEPARATOR, ":"),
54+
Token(Token.Type.CONTENT, "50515"),
55+
Token(Token.Type.TERMINATOR, "'"),
5656
],
5757
)
5858

@@ -61,10 +61,10 @@ def test_escape():
6161
_assert_tokens(
6262
"RFF+PD?:5'",
6363
[
64-
Token(TokenType.CONTENT, "RFF"),
65-
Token(TokenType.DATA_SEPARATOR, "+"),
66-
Token(TokenType.CONTENT, "PD:5"),
67-
Token(TokenType.TERMINATOR, "'"),
64+
Token(Token.Type.CONTENT, "RFF"),
65+
Token(Token.Type.DATA_SEPARATOR, "+"),
66+
Token(Token.Type.CONTENT, "PD:5"),
67+
Token(Token.Type.TERMINATOR, "'"),
6868
],
6969
)
7070

@@ -73,12 +73,12 @@ def test_double_escape():
7373
_assert_tokens(
7474
"RFF+PD??:5'",
7575
[
76-
Token(TokenType.CONTENT, "RFF"),
77-
Token(TokenType.DATA_SEPARATOR, "+"),
78-
Token(TokenType.CONTENT, "PD?"),
79-
Token(TokenType.COMPONENT_SEPARATOR, ":"),
80-
Token(TokenType.CONTENT, "5"),
81-
Token(TokenType.TERMINATOR, "'"),
76+
Token(Token.Type.CONTENT, "RFF"),
77+
Token(Token.Type.DATA_SEPARATOR, "+"),
78+
Token(Token.Type.CONTENT, "PD?"),
79+
Token(Token.Type.COMPONENT_SEPARATOR, ":"),
80+
Token(Token.Type.CONTENT, "5"),
81+
Token(Token.Type.TERMINATOR, "'"),
8282
],
8383
)
8484

@@ -87,10 +87,10 @@ def test_triple_escape():
8787
_assert_tokens(
8888
"RFF+PD???:5'",
8989
[
90-
Token(TokenType.CONTENT, "RFF"),
91-
Token(TokenType.DATA_SEPARATOR, "+"),
92-
Token(TokenType.CONTENT, "PD?:5"),
93-
Token(TokenType.TERMINATOR, "'"),
90+
Token(Token.Type.CONTENT, "RFF"),
91+
Token(Token.Type.DATA_SEPARATOR, "+"),
92+
Token(Token.Type.CONTENT, "PD?:5"),
93+
Token(Token.Type.TERMINATOR, "'"),
9494
],
9595
)
9696

@@ -99,12 +99,12 @@ def test_quadruple_escape():
9999
_assert_tokens(
100100
"RFF+PD????:5'",
101101
[
102-
Token(TokenType.CONTENT, "RFF"),
103-
Token(TokenType.DATA_SEPARATOR, "+"),
104-
Token(TokenType.CONTENT, "PD??"),
105-
Token(TokenType.COMPONENT_SEPARATOR, ":"),
106-
Token(TokenType.CONTENT, "5"),
107-
Token(TokenType.TERMINATOR, "'"),
102+
Token(Token.Type.CONTENT, "RFF"),
103+
Token(Token.Type.DATA_SEPARATOR, "+"),
104+
Token(Token.Type.CONTENT, "PD??"),
105+
Token(Token.Type.COMPONENT_SEPARATOR, ":"),
106+
Token(Token.Type.CONTENT, "5"),
107+
Token(Token.Type.TERMINATOR, "'"),
108108
],
109109
)
110110

@@ -113,10 +113,10 @@ def test_starts_with_escape():
113113
_assert_tokens(
114114
"DTM+?+0'",
115115
[
116-
Token(TokenType.CONTENT, "DTM"),
117-
Token(TokenType.DATA_SEPARATOR, "+"),
118-
Token(TokenType.CONTENT, "+0"),
119-
Token(TokenType.TERMINATOR, "'"),
116+
Token(Token.Type.CONTENT, "DTM"),
117+
Token(Token.Type.DATA_SEPARATOR, "+"),
118+
Token(Token.Type.CONTENT, "+0"),
119+
Token(Token.Type.TERMINATOR, "'"),
120120
],
121121
)
122122

@@ -127,14 +127,14 @@ def test_starts_with_escape():
127127
@pytest.fixture
128128
def expected_crlf():
129129
return [
130-
Token(TokenType.CONTENT, "RFF"),
131-
Token(TokenType.COMPONENT_SEPARATOR, ":"),
132-
Token(TokenType.CONTENT, "5"),
133-
Token(TokenType.TERMINATOR, "'"),
134-
Token(TokenType.CONTENT, "DEF"),
135-
Token(TokenType.COMPONENT_SEPARATOR, ":"),
136-
Token(TokenType.CONTENT, "6"),
137-
Token(TokenType.TERMINATOR, "'"),
130+
Token(Token.Type.CONTENT, "RFF"),
131+
Token(Token.Type.COMPONENT_SEPARATOR, ":"),
132+
Token(Token.Type.CONTENT, "5"),
133+
Token(Token.Type.TERMINATOR, "'"),
134+
Token(Token.Type.CONTENT, "DEF"),
135+
Token(Token.Type.COMPONENT_SEPARATOR, ":"),
136+
Token(Token.Type.CONTENT, "6"),
137+
Token(Token.Type.TERMINATOR, "'"),
138138
]
139139

140140

@@ -167,10 +167,10 @@ def test_ignore_long_whitespace(expected_crlf):
167167

168168

169169
def test_no_terminator():
170-
with pytest.raises(EdifactSyntaxError):
170+
with pytest.raises(EDISyntaxError):
171171
list(Tokenizer().get_tokens("TEST"))
172172

173-
with pytest.raises(EdifactSyntaxError) as excinfo:
173+
with pytest.raises(EDISyntaxError) as excinfo:
174174
list(
175175
Tokenizer().get_tokens(
176176
"UNB+IBMA:1+FACHARZT A+PRAKTIKER X+950402+1200+1'"
@@ -182,8 +182,8 @@ def test_no_terminator():
182182

183183

184184
def test_escaped_newline_char():
185-
with pytest.raises(EdifactSyntaxError) as excinfo:
186-
# must raise a EdifactSyntaxError as there is no newline after an escape char
185+
with pytest.raises(EDISyntaxError) as excinfo:
186+
# must raise a EDISyntaxError as there is no newline after an escape char
187187
# "?" allowed.
188188
list(
189189
Tokenizer().get_tokens(
@@ -195,8 +195,8 @@ def test_escaped_newline_char():
195195
assert "line 0, column 5" in str(excinfo.value)
196196

197197
# a "\n" must do the same as a real newline
198-
with pytest.raises(EdifactSyntaxError) as excinfo:
199-
# must raise a EdifactSyntaxError as there is no newline after an escape char
198+
with pytest.raises(EDISyntaxError) as excinfo:
199+
# must raise a EDISyntaxError as there is no newline after an escape char
200200
# "?" allowed.
201201
list(Tokenizer().get_tokens("UNB+?\nFOO'"))
202202
assert "Newlines after escape characters are not allowed." in str(excinfo.value)

0 commit comments

Comments
 (0)