@@ -17,9 +17,8 @@ msgstr ""
17
17
"Generated-By : Babel 2.17.0\n "
18
18
19
19
#: ../../library/tokenize.rst:2
20
- #, fuzzy
21
20
msgid ":mod:`!tokenize` --- Tokenizer for Python source"
22
- msgstr ":mod:`tokenize` --- 파이썬 소스를 위한 토크나이저"
21
+ msgstr ":mod:`! tokenize` --- 파이썬 소스를 위한 토크나이저"
23
22
24
23
#: ../../library/tokenize.rst:10
25
24
msgid "**Source code:** :source:`Lib/tokenize.py`"
@@ -164,15 +163,14 @@ msgstr ""
164
163
"반환해야 합니다. 추가 시퀀스 요소는 무시됩니다."
165
164
166
165
#: ../../library/tokenize.rst:94
167
- #, fuzzy
168
166
msgid ""
169
167
"The result is guaranteed to tokenize back to match the input so that the "
170
168
"conversion is lossless and round-trips are assured. The guarantee "
171
169
"applies only to the token type and token string as the spacing between "
172
170
"tokens (column positions) may change."
173
171
msgstr ""
174
- "재구성된 스크립트는 단일 문자열로 반환됩니다. 결과는 다시 토큰화하면 입력과 일치함이 보장되어, 변환은 무손실이고 왕복이 "
175
- "보장됩니다. 보증은 토큰 유형과 토큰 문자열에만 적용되어, 토큰 간의 간격(열 위치)은 변경될 수 있습니다."
172
+ "결과는 다시 토큰화하면 입력과 일치함이 보장되어, 변환은 무손실이고 왕복이 보장됩니다. 보증은 토큰 유형과 토큰 문자열에만 "
173
+ "적용되어, 토큰 간의 간격(열 위치)은 변경될 수 있습니다."
176
174
177
175
#: ../../library/tokenize.rst:99
178
176
msgid ""
@@ -251,6 +249,8 @@ msgid ""
251
249
"\"\"\" Beginning of\n"
252
250
"docstring"
253
251
msgstr ""
252
+ "\"\"\" Beginning of\n"
253
+ "docstring"
254
254
255
255
#: ../../library/tokenize.rst:144
256
256
msgid "or::"
@@ -262,6 +262,9 @@ msgid ""
262
262
" 2,\n"
263
263
" 3"
264
264
msgstr ""
265
+ "[1,\n"
266
+ " 2,\n"
267
+ " 3"
265
268
266
269
#: ../../library/tokenize.rst:153
267
270
msgid "Command-Line Usage"
@@ -275,7 +278,7 @@ msgstr ":mod:`tokenize` 모듈은 명령 줄에서 스크립트로 실행될 수
275
278
276
279
#: ../../library/tokenize.rst:160
277
280
msgid "python -m tokenize [-e] [filename.py]"
278
- msgstr ""
281
+ msgstr "python -m tokenize [-e] [filename.py] "
279
282
280
283
#: ../../library/tokenize.rst:164
281
284
msgid "The following options are accepted:"
@@ -350,6 +353,42 @@ msgid ""
350
353
" result.append((toknum, tokval))\n"
351
354
" return untokenize(result).decode('utf-8')"
352
355
msgstr ""
356
+ "from tokenize import tokenize, untokenize, NUMBER, STRING, NAME, OP\n"
357
+ "from io import BytesIO\n"
358
+ "\n"
359
+ "def decistmt(s):\n"
360
+ " \"\"\" 문장 문자열에 있는 float를 Decimal로 치환합니다.\n"
361
+ "\n"
362
+ " >>> from decimal import Decimal\n"
363
+ " >>> s = 'print(+21.3e-5*-.1234/81.7)'\n"
364
+ " >>> decistmt(s)\n"
365
+ " \" print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))\" \n"
366
+ "\n"
367
+ " 지수 형식은 플랫폼 C 라이브러리에서 상속됩니다. 알려진 경우는 \" e-007\" (윈도우) 와 \" e-07\" \n"
368
+ " (윈도우 외) 입니다. 우리는 단지 12자리 숫자를 보여주고 있고, 13번째 숫자는 5에 가깝지\n"
369
+ " 않으므로, 출력의 나머지 부분은 플랫폼 독립적이어야 합니다.\n"
370
+ "\n"
371
+ " >>> exec(s) #doctest: +ELLIPSIS\n"
372
+ " -3.21716034272e-0...7\n"
373
+ "\n"
374
+ " Decimal을 사용한 계산의 결과는 모든 플랫폼에서 같아야 합니다.\n"
375
+ "\n"
376
+ " >>> exec(decistmt(s))\n"
377
+ " -3.217160342717258261933904529E-7\n"
378
+ " \"\"\" \n"
379
+ " result = []\n"
380
+ " g = tokenize(BytesIO(s.encode('utf-8')).readline) # 문자열을 토큰화합니다\n"
381
+ " for toknum, tokval, _, _, _ in g:\n"
382
+ " if toknum == NUMBER and '.' in tokval: # NUMBER 토큰을 바꿔치기합니다\n"
383
+ " result.extend([\n"
384
+ " (NAME, 'Decimal'),\n"
385
+ " (OP, '('),\n"
386
+ " (STRING, repr(tokval)),\n"
387
+ " (OP, ')')\n"
388
+ " ])\n"
389
+ " else:\n"
390
+ " result.append((toknum, tokval))\n"
391
+ " return untokenize(result).decode('utf-8')"
353
392
354
393
#: ../../library/tokenize.rst:224
355
394
msgid "Example of tokenizing from the command line. The script::"
@@ -362,6 +401,10 @@ msgid ""
362
401
"\n"
363
402
"say_hello()"
364
403
msgstr ""
404
+ "def say_hello():\n"
405
+ " print(\" Hello, World!\" )\n"
406
+ "\n"
407
+ "say_hello()"
365
408
366
409
#: ../../library/tokenize.rst:231
367
410
msgid ""
@@ -397,6 +440,27 @@ msgid ""
397
440
"4,11-4,12: NEWLINE '\\ n'\n"
398
441
"5,0-5,0: ENDMARKER ''"
399
442
msgstr ""
443
+ "$ python -m tokenize hello.py\n"
444
+ "0,0-0,0: ENCODING 'utf-8'\n"
445
+ "1,0-1,3: NAME 'def'\n"
446
+ "1,4-1,13: NAME 'say_hello'\n"
447
+ "1,13-1,14: OP '('\n"
448
+ "1,14-1,15: OP ')'\n"
449
+ "1,15-1,16: OP ':'\n"
450
+ "1,16-1,17: NEWLINE '\\ n'\n"
451
+ "2,0-2,4: INDENT ' '\n"
452
+ "2,4-2,9: NAME 'print'\n"
453
+ "2,9-2,10: OP '('\n"
454
+ "2,10-2,25: STRING '\" Hello, World!\" '\n"
455
+ "2,25-2,26: OP ')'\n"
456
+ "2,26-2,27: NEWLINE '\\ n'\n"
457
+ "3,0-3,1: NL '\\ n'\n"
458
+ "4,0-4,0: DEDENT ''\n"
459
+ "4,0-4,9: NAME 'say_hello'\n"
460
+ "4,9-4,10: OP '('\n"
461
+ "4,10-4,11: OP ')'\n"
462
+ "4,11-4,12: NEWLINE '\\ n'\n"
463
+ "5,0-5,0: ENDMARKER ''"
400
464
401
465
#: ../../library/tokenize.rst:259
402
466
msgid "The exact token type names can be displayed using the :option:`-e` option:"
@@ -426,6 +490,26 @@ msgid ""
426
490
"4,11-4,12: NEWLINE '\\ n'\n"
427
491
"5,0-5,0: ENDMARKER ''"
428
492
msgstr ""
493
+ "0,0-0,0: ENCODING 'utf-8'\n"
494
+ "1,0-1,3: NAME 'def'\n"
495
+ "1,4-1,13: NAME 'say_hello'\n"
496
+ "1,13-1,14: LPAR '('\n"
497
+ "1,14-1,15: RPAR ')'\n"
498
+ "1,15-1,16: COLON ':'\n"
499
+ "1,16-1,17: NEWLINE '\\ n'\n"
500
+ "2,0-2,4: INDENT ' '\n"
501
+ "2,4-2,9: NAME 'print'\n"
502
+ "2,9-2,10: LPAR '('\n"
503
+ "2,10-2,25: STRING '\" Hello, World!\" '\n"
504
+ "2,25-2,26: RPAR ')'\n"
505
+ "2,26-2,27: NEWLINE '\\ n'\n"
506
+ "3,0-3,1: NL '\\ n'\n"
507
+ "4,0-4,0: DEDENT ''\n"
508
+ "4,0-4,9: NAME 'say_hello'\n"
509
+ "4,9-4,10: LPAR '('\n"
510
+ "4,10-4,11: RPAR ')'\n"
511
+ "4,11-4,12: NEWLINE '\\ n'\n"
512
+ "5,0-5,0: ENDMARKER ''"
429
513
430
514
#: ../../library/tokenize.rst:285
431
515
msgid ""
@@ -442,6 +526,12 @@ msgid ""
442
526
" for token in tokens:\n"
443
527
" print(token)"
444
528
msgstr ""
529
+ "import tokenize\n"
530
+ "\n"
531
+ "with tokenize.open('hello.py') as f:\n"
532
+ " tokens = tokenize.generate_tokens(f.readline)\n"
533
+ " for token in tokens:\n"
534
+ " print(token)"
445
535
446
536
#: ../../library/tokenize.rst:295
447
537
msgid "Or reading bytes directly with :func:`.tokenize`::"
@@ -456,15 +546,10 @@ msgid ""
456
546
" for token in tokens:\n"
457
547
" print(token)"
458
548
msgstr ""
459
-
460
- #~ msgid ""
461
- #~ "Note that unclosed single-quoted strings"
462
- #~ " do not cause an error to be"
463
- #~ " raised. They are tokenized as "
464
- #~ ":data:`~token.ERRORTOKEN`, followed by the "
465
- #~ "tokenization of their contents."
466
- #~ msgstr ""
467
- #~ "닫히지 않은 작은따옴표로 묶인 문자열은 에러를 발생시키지"
468
- #~ " 않음에 유의하십시오. 그것들은 :data:`~token.ERRORTOKEN`\\로"
469
- #~ " 토큰화되고, 그 뒤에 내용이 토큰화됩니다."
549
+ "import tokenize\n"
550
+ "\n"
551
+ "with open('hello.py', 'rb') as f:\n"
552
+ " tokens = tokenize.tokenize(f.readline)\n"
553
+ " for token in tokens:\n"
554
+ " print(token)"
470
555
0 commit comments