Skip to content

Commit dcb3c29

Browse files
committed
#774 - remove fuzzy flags
1 parent 8506de4 commit dcb3c29

File tree

1 file changed

+102
-17
lines changed

1 file changed

+102
-17
lines changed

library/tokenize.po

Lines changed: 102 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@ msgstr ""
1717
"Generated-By: Babel 2.17.0\n"
1818

1919
#: ../../library/tokenize.rst:2
20-
#, fuzzy
2120
msgid ":mod:`!tokenize` --- Tokenizer for Python source"
22-
msgstr ":mod:`tokenize` --- 파이썬 소스를 위한 토크나이저"
21+
msgstr ":mod:`!tokenize` --- 파이썬 소스를 위한 토크나이저"
2322

2423
#: ../../library/tokenize.rst:10
2524
msgid "**Source code:** :source:`Lib/tokenize.py`"
@@ -164,15 +163,14 @@ msgstr ""
164163
"반환해야 합니다. 추가 시퀀스 요소는 무시됩니다."
165164

166165
#: ../../library/tokenize.rst:94
167-
#, fuzzy
168166
msgid ""
169167
"The result is guaranteed to tokenize back to match the input so that the "
170168
"conversion is lossless and round-trips are assured. The guarantee "
171169
"applies only to the token type and token string as the spacing between "
172170
"tokens (column positions) may change."
173171
msgstr ""
174-
"재구성된 스크립트는 단일 문자열로 반환됩니다. 결과는 다시 토큰화하면 입력과 일치함이 보장되어, 변환은 무손실이고 왕복이 "
175-
"보장됩니다. 보증은 토큰 유형과 토큰 문자열에만 적용되어, 토큰 간의 간격(열 위치)은 변경될 수 있습니다."
172+
"결과는 다시 토큰화하면 입력과 일치함이 보장되어, 변환은 무손실이고 왕복이 보장됩니다. 보증은 토큰 유형과 토큰 문자열에만 "
173+
"적용되어, 토큰 간의 간격(열 위치)은 변경될 수 있습니다."
176174

177175
#: ../../library/tokenize.rst:99
178176
msgid ""
@@ -251,6 +249,8 @@ msgid ""
251249
"\"\"\"Beginning of\n"
252250
"docstring"
253251
msgstr ""
252+
"\"\"\"Beginning of\n"
253+
"docstring"
254254

255255
#: ../../library/tokenize.rst:144
256256
msgid "or::"
@@ -262,6 +262,9 @@ msgid ""
262262
" 2,\n"
263263
" 3"
264264
msgstr ""
265+
"[1,\n"
266+
" 2,\n"
267+
" 3"
265268

266269
#: ../../library/tokenize.rst:153
267270
msgid "Command-Line Usage"
@@ -275,7 +278,7 @@ msgstr ":mod:`tokenize` 모듈은 명령 줄에서 스크립트로 실행될 수
275278

276279
#: ../../library/tokenize.rst:160
277280
msgid "python -m tokenize [-e] [filename.py]"
278-
msgstr ""
281+
msgstr "python -m tokenize [-e] [filename.py]"
279282

280283
#: ../../library/tokenize.rst:164
281284
msgid "The following options are accepted:"
@@ -350,6 +353,42 @@ msgid ""
350353
" result.append((toknum, tokval))\n"
351354
" return untokenize(result).decode('utf-8')"
352355
msgstr ""
356+
"from tokenize import tokenize, untokenize, NUMBER, STRING, NAME, OP\n"
357+
"from io import BytesIO\n"
358+
"\n"
359+
"def decistmt(s):\n"
360+
" \"\"\"문장 문자열에 있는 float를 Decimal로 치환합니다.\n"
361+
"\n"
362+
" >>> from decimal import Decimal\n"
363+
" >>> s = 'print(+21.3e-5*-.1234/81.7)'\n"
364+
" >>> decistmt(s)\n"
365+
" \"print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))\"\n"
366+
"\n"
367+
" 지수 형식은 플랫폼 C 라이브러리에서 상속됩니다. 알려진 경우는 \"e-007\" (윈도우) 와 \"e-07\"\n"
368+
" (윈도우 외) 입니다. 우리는 단지 12자리 숫자를 보여주고 있고, 13번째 숫자는 5에 가깝지\n"
369+
" 않으므로, 출력의 나머지 부분은 플랫폼 독립적이어야 합니다.\n"
370+
"\n"
371+
" >>> exec(s) #doctest: +ELLIPSIS\n"
372+
" -3.21716034272e-0...7\n"
373+
"\n"
374+
" Decimal을 사용한 계산의 결과는 모든 플랫폼에서 같아야 합니다.\n"
375+
"\n"
376+
" >>> exec(decistmt(s))\n"
377+
" -3.217160342717258261933904529E-7\n"
378+
" \"\"\"\n"
379+
" result = []\n"
380+
" g = tokenize(BytesIO(s.encode('utf-8')).readline) # 문자열을 토큰화합니다\n"
381+
" for toknum, tokval, _, _, _ in g:\n"
382+
" if toknum == NUMBER and '.' in tokval: # NUMBER 토큰을 바꿔치기합니다\n"
383+
" result.extend([\n"
384+
" (NAME, 'Decimal'),\n"
385+
" (OP, '('),\n"
386+
" (STRING, repr(tokval)),\n"
387+
" (OP, ')')\n"
388+
" ])\n"
389+
" else:\n"
390+
" result.append((toknum, tokval))\n"
391+
" return untokenize(result).decode('utf-8')"
353392

354393
#: ../../library/tokenize.rst:224
355394
msgid "Example of tokenizing from the command line. The script::"
@@ -362,6 +401,10 @@ msgid ""
362401
"\n"
363402
"say_hello()"
364403
msgstr ""
404+
"def say_hello():\n"
405+
" print(\"Hello, World!\")\n"
406+
"\n"
407+
"say_hello()"
365408

366409
#: ../../library/tokenize.rst:231
367410
msgid ""
@@ -397,6 +440,27 @@ msgid ""
397440
"4,11-4,12: NEWLINE '\\n'\n"
398441
"5,0-5,0: ENDMARKER ''"
399442
msgstr ""
443+
"$ python -m tokenize hello.py\n"
444+
"0,0-0,0: ENCODING 'utf-8'\n"
445+
"1,0-1,3: NAME 'def'\n"
446+
"1,4-1,13: NAME 'say_hello'\n"
447+
"1,13-1,14: OP '('\n"
448+
"1,14-1,15: OP ')'\n"
449+
"1,15-1,16: OP ':'\n"
450+
"1,16-1,17: NEWLINE '\\n'\n"
451+
"2,0-2,4: INDENT ' '\n"
452+
"2,4-2,9: NAME 'print'\n"
453+
"2,9-2,10: OP '('\n"
454+
"2,10-2,25: STRING '\"Hello, World!\"'\n"
455+
"2,25-2,26: OP ')'\n"
456+
"2,26-2,27: NEWLINE '\\n'\n"
457+
"3,0-3,1: NL '\\n'\n"
458+
"4,0-4,0: DEDENT ''\n"
459+
"4,0-4,9: NAME 'say_hello'\n"
460+
"4,9-4,10: OP '('\n"
461+
"4,10-4,11: OP ')'\n"
462+
"4,11-4,12: NEWLINE '\\n'\n"
463+
"5,0-5,0: ENDMARKER ''"
400464

401465
#: ../../library/tokenize.rst:259
402466
msgid "The exact token type names can be displayed using the :option:`-e` option:"
@@ -426,6 +490,26 @@ msgid ""
426490
"4,11-4,12: NEWLINE '\\n'\n"
427491
"5,0-5,0: ENDMARKER ''"
428492
msgstr ""
493+
"0,0-0,0: ENCODING 'utf-8'\n"
494+
"1,0-1,3: NAME 'def'\n"
495+
"1,4-1,13: NAME 'say_hello'\n"
496+
"1,13-1,14: LPAR '('\n"
497+
"1,14-1,15: RPAR ')'\n"
498+
"1,15-1,16: COLON ':'\n"
499+
"1,16-1,17: NEWLINE '\\n'\n"
500+
"2,0-2,4: INDENT ' '\n"
501+
"2,4-2,9: NAME 'print'\n"
502+
"2,9-2,10: LPAR '('\n"
503+
"2,10-2,25: STRING '\"Hello, World!\"'\n"
504+
"2,25-2,26: RPAR ')'\n"
505+
"2,26-2,27: NEWLINE '\\n'\n"
506+
"3,0-3,1: NL '\\n'\n"
507+
"4,0-4,0: DEDENT ''\n"
508+
"4,0-4,9: NAME 'say_hello'\n"
509+
"4,9-4,10: LPAR '('\n"
510+
"4,10-4,11: RPAR ')'\n"
511+
"4,11-4,12: NEWLINE '\\n'\n"
512+
"5,0-5,0: ENDMARKER ''"
429513

430514
#: ../../library/tokenize.rst:285
431515
msgid ""
@@ -442,6 +526,12 @@ msgid ""
442526
" for token in tokens:\n"
443527
" print(token)"
444528
msgstr ""
529+
"import tokenize\n"
530+
"\n"
531+
"with tokenize.open('hello.py') as f:\n"
532+
" tokens = tokenize.generate_tokens(f.readline)\n"
533+
" for token in tokens:\n"
534+
" print(token)"
445535

446536
#: ../../library/tokenize.rst:295
447537
msgid "Or reading bytes directly with :func:`.tokenize`::"
@@ -456,15 +546,10 @@ msgid ""
456546
" for token in tokens:\n"
457547
" print(token)"
458548
msgstr ""
459-
460-
#~ msgid ""
461-
#~ "Note that unclosed single-quoted strings"
462-
#~ " do not cause an error to be"
463-
#~ " raised. They are tokenized as "
464-
#~ ":data:`~token.ERRORTOKEN`, followed by the "
465-
#~ "tokenization of their contents."
466-
#~ msgstr ""
467-
#~ "닫히지 않은 작은따옴표로 묶인 문자열은 에러를 발생시키지"
468-
#~ " 않음에 유의하십시오. 그것들은 :data:`~token.ERRORTOKEN`\\로"
469-
#~ " 토큰화되고, 그 뒤에 내용이 토큰화됩니다."
549+
"import tokenize\n"
550+
"\n"
551+
"with open('hello.py', 'rb') as f:\n"
552+
" tokens = tokenize.tokenize(f.readline)\n"
553+
" for token in tokens:\n"
554+
" print(token)"
470555

0 commit comments

Comments
 (0)