From fec6d123055be357e046c93ad05f4d106bd0fb50 Mon Sep 17 00:00:00 2001 From: itincknell Date: Sat, 9 Mar 2024 23:09:01 -0500 Subject: [PATCH] Quechua added to language options --- .gitignore | 43 ++++++++++++++++++++++++ src/convert_file_utilities.py | 29 ++++++++-------- src/create_word.py | 38 ++++++++++++++++++--- src/edit_all.py | 20 ++++++++--- src/edit_dictionary.py | 63 +++++++++++++++++++++++++---------- src/get_simple.py | 22 ++++++++++-- src/load_dict.py | 18 +++++----- src/main.py | 2 +- src/parser_shell.py | 42 ++++++++++++++++++----- src/tables_greek_ext.py | 4 +++ src/word_methods.py | 25 ++------------ src/word_print_edit.py | 6 ++-- 12 files changed, 227 insertions(+), 85 deletions(-) diff --git a/.gitignore b/.gitignore index e43b0f9..9d18004 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,44 @@ .DS_Store +Perseus_text_1999.04.0058.txt +grc.lsj.perseus-eng1.txt +grc.lsj.perseus-eng2.txt +grc.lsj.perseus-eng3.txt +grc.lsj.perseus-eng4.txt +grc.lsj.perseus-eng5.txt +grc.lsj.perseus-eng6.txt +grc.lsj.perseus-eng7.txt +grc.lsj.perseus-eng8.txt +grc.lsj.perseus-eng9.txt +grc.lsj.perseus-eng10.txt +grc.lsj.perseus-eng11.txt +grc.lsj.perseus-eng12.txt +grc.lsj.perseus-eng13.txt +grc.lsj.perseus-eng14.txt +grc.lsj.perseus-eng15.txt +grc.lsj.perseus-eng16.txt +grc.lsj.perseus-eng17.txt +grc.lsj.perseus-eng18.txt +grc.lsj.perseus-eng19.txt +grc.lsj.perseus-eng20.txt +grc.lsj.perseus-eng21.txt +grc.lsj.perseus-eng22.txt +grc.lsj.perseus-eng23.txt +grc.lsj.perseus-eng24.txt +grc.lsj.perseus-eng25.txt +grc.lsj.perseus-eng26.txt +grc.lsj.perseus-eng27.txt +OldEnglish-trie.txt +Latin-trie.txt +AncientGreek-trie.txt +Latin_new_tag_list.txt +GreekDictionary.txt +LatinDictionary.txt +OldEnglishDictionary.txt +Old English_new_tag_list.txt +Ancient Greek_new_tag_list.txt +formatted_flashcard_files/ +kaikki_json_files/ +sorted_language_files/ +src/__pycache__/ +supplementary_language_files/Quechua_new_tag_list.txt +user_created_dictionaries/ diff --git a/src/convert_file_utilities.py b/src/convert_file_utilities.py index cb1f319..66e5c02 100644 --- a/src/convert_file_utilities.py +++ b/src/convert_file_utilities.py @@ -22,7 +22,7 @@ def debug_print(Test, *args): """Print messages if Test is True.""" - if Test: + if False: print(*args) def similar_enough(item, tag): @@ -133,10 +133,10 @@ def process_glosses(glosses, gloss_tags): gloss_tags = process_glosses(sense['raw_glosses'], gloss_tags) if Test: - print(f"dupe_list = {dupe_list}") + debug_print(f"dupe_list = {dupe_list}") if Test: - print(f"senses: {senses}") + debug_print(f"senses: {senses}") return senses def get_file_selection(Test, test_file, test_language): @@ -174,14 +174,14 @@ def get_file_selection(Test, test_file, test_language): def print_debug_info(line, counter): ''' Function for viewing unprocessed json data ''' - print('\n') - print(f"\tline: {counter}, word: {line['word']}") - print("WORD ITEMS >>>>>>>>>>>>>>") + debug_print('\n') + debug_print(f"\tline: {counter}, word: {line['word']}") + debug_print("WORD ITEMS >>>>>>>>>>>>>>") for item in line.items(): - print(item) - print("SENSES ITEMS >>>>>>>>>>>>") + debug_print(item) + debug_print("SENSES ITEMS >>>>>>>>>>>>") for item in line['senses'][0].items(): - print(item) + debug_print(item) def handle_pos(line): ''' Matches json pos abbreviations with those used in @@ -362,13 +362,16 @@ def convert_files(): tag_list = [] # choose language option, create kaikki.org file string - language = pick_language() + if not Test: + language = pick_language() + else: + language = test_language file = "kaikki.org-dictionary-" + language.replace(' ','') + ".json" # this module loads a '-trie.txt' file which can take a few seconds, should be avoided if lanugage != Latin if language == "Latin": from get_simple import get_simple - simple = get_simple + simple = lambda x, y, z : get_simple(x, y, z, language) else: simple = None @@ -382,7 +385,7 @@ def convert_files(): change_path(KAIKKI_JSON_FILES) try: with open(file, 'r') as input_file: - new_dictionary['definitions'] = parse_lines(input_file, tag_list,language,simple) + new_dictionary['definitions'] = parse_lines(input_file, tag_list, language, simple) # save a list of all tags that were encountered change_path(SUPPLEMENTARY_LANGUAGE_FILES) @@ -441,7 +444,7 @@ def convert_files(): if Test: - sort_dump() + convert_files() diff --git a/src/create_word.py b/src/create_word.py index d297c05..60a23ad 100644 --- a/src/create_word.py +++ b/src/create_word.py @@ -49,12 +49,40 @@ def create_word(current_dict,tags): new_word['entries'][0], dummy = edit_entry.edit_entry(new_word['entries'][0],new_word) # call word options, from here return - load_dict.change_path(SORTED_LANGUAGE_FILES) - if current_dict['language'] == 'Latin' or current_dict['language'] == "Ancient Greek": - wiki_dump = parser_shell.load_big_language(new_word['heading'][0],current_dict['language']) + + + # load trie for current language + load_dict.change_path(load_dict.SORTED_LANGUAGE_FILES) + file_name = current_dict['language'].replace(" ","") + '-trie.txt' + try: + with open(file_name,'rb') as openFile: + sorted_language_object = pickle.load(openFile) + except FileNotFoundError: + input(f'Error: "{file_name}" not found in sorted_languages_files;\ngo to data files in main menu to extract files.\n') + return None + + t = sorted_language_object['definitions'] + + key = unidecode(new_word['heading']).lower() + + if key not in t: + t[key] = new_word + + # if key is already used else: - wiki_dump = parser_shell.load_sorted_language(current_dict['language']) - parser_shell.save_word(new_word,wiki_dump,2) + # if a list is already started + if isinstance(t[key],list): + t[key].append(new_word) + + # otherwise start a list with [previous item, new item] + else: + t[key] = [t[key],new_word] + + sorted_language_object['definitions'] = t + + with open(sorted_language_object['file'],mode = 'wb') as openFile: + pickle.dump(sorted_language_object, openFile) + current_dict = parser_shell.save_word(new_word,current_dict) return current_dict # END CREATE WORD diff --git a/src/edit_all.py b/src/edit_all.py index 504115f..fb9f3e4 100644 --- a/src/edit_all.py +++ b/src/edit_all.py @@ -12,7 +12,7 @@ from iteration_utilities import unique_everseen from copy import deepcopy from unidecode import unidecode -from load_dict import change_path, USER_CREATED_DICTIONARIES, FLASHCARD_TEMPLATE_FILES +from load_dict import change_path, USER_CREATED_DICTIONARIES, FLASHCARD_TEMPLATE_FILES, SORTED_LANGUAGE_FILES import word_print_edit import edit_entry import edit_dictionary @@ -30,7 +30,7 @@ def edit_all(current_dict): '2':">'2' special option\n", 'a':">'a' change file name\n", 'b':">'b' special option II\n", - 'c':">'c' convert to gloss/tags senses\n", + 'c':">'c' replace_defs_with_sensess\n", 'd':">'d' match dictionaries\n", '3':">'3' to edit a subset by tag\n", '4':">'4' to remove punctuation\n", @@ -72,7 +72,7 @@ def edit_all(current_dict): elif user_input == '2': current = special(current_dict) elif user_input == 'c': - current_dict = replace_senses(current_dict) + current_dict = replace_defs_with_senses(current_dict) elif user_input == 'd': match_dictionaries(current_dict) elif user_input == 'p': @@ -233,6 +233,18 @@ def replace_senses(current_dict): openFile.close() return current_dict +def replace_defs_with_senses(current_dict): + for i in range(len(current_dict['definitions'])): + for j in range(len(current_dict['definitions'][i]['entries'])): + if 'defs' in current_dict['definitions'][i]['entries'][j]: + current_dict['definitions'][i]['entries'][j]['senses'] = current_dict['definitions'][i]['entries'][j]['defs'] + del current_dict['definitions'][i]['entries'][j]['defs'] + + openFile = open(current_dict['file'],mode = 'wb') + pickle.dump(current_dict, openFile) + openFile.close() + return current_dict + def load_latin(index_letter): if index_letter.lower() not in 'abcdefghijklmnopqrstuvwxyz': index_letter = 'misc' @@ -243,7 +255,7 @@ def load_latin(index_letter): def match_dictionaries(current_dict): - change_path("dumps_sorted") + change_path(SORTED_LANGUAGE_FILES) trie_file = current_dict['language'].replace(" ","") + '-trie.txt' print(f"Loading {trie_file}") diff --git a/src/edit_dictionary.py b/src/edit_dictionary.py index 64349b7..52fd206 100644 --- a/src/edit_dictionary.py +++ b/src/edit_dictionary.py @@ -633,6 +633,7 @@ def filter_gloss(current_dict,tags,output_file=None,tag_mode='1'): # assign user selected file to output sys.stdout = open(output_file, 'w') + get_selection.clear_screen() if tags: print(f"\n\t{str(tags)}\n") @@ -641,9 +642,12 @@ def filter_gloss(current_dict,tags,output_file=None,tag_mode='1'): "article", "preposition", "conjunction","pronoun","letter", "character", "phrase", "proverb", "idiom","symbol", "syllable", "numeral", "initialism", "interjection","definitions"] + for part in parts_list: count += print_gloss(current_dict,tags,part,tag_mode) + input("\n\nEnter to continue") + if output_file: # re-assign orinigal output sys.stdout = original_stdout @@ -658,6 +662,8 @@ def print_gloss(current_dict,tags,partOfSpeech=None,tag_mode='1'): strings = {} first_run = True + window_size = os.get_terminal_size().columns + counter = 0 # Loop to create sub-list to select from @@ -690,16 +696,26 @@ def print_gloss(current_dict,tags,partOfSpeech=None,tag_mode='1'): counter += 1 # print with desired alignment - if current_dict['language'] == 'Latin': + if True: #current_dict['language'] == 'Latin': simpleParts = word['entries'][x]['simpleParts'] + + difference = len(simpleParts) - get_selection.visible_len(simpleParts) + if partOfSpeech == 'verb': - if len(simpleParts) > 50: - simpleParts = simpleParts[:49] + "-" - entry_string = f"{simpleParts:.<50} | " + + if get_selection.visible_len(simpleParts) > 50: + simpleParts = simpleParts[:49 + difference] + "-" + + entry_string = f"{simpleParts:.<{50 + difference}} | " + else: - if len(simpleParts) > 30: - simpleParts = simpleParts[:29] + "-" - entry_string = f"{simpleParts:.<30} | " + if get_selection.visible_len(simpleParts) > 30: + while(get_selection.visible_len(simpleParts) != 30): + simpleParts = simpleParts[:-2] + "-" + difference = 0 + + entry_string = f"{simpleParts:.<{30 + difference}} | " + # check if definition exceeds desired length text = [d['gloss'] for d in word['entries'][x]['senses']] dtags = [d['tags'] for d in word['entries'][x]['senses']] @@ -709,22 +725,28 @@ def print_gloss(current_dict,tags,partOfSpeech=None,tag_mode='1'): else: for i in range(len(text)): entry_string += f"{i+1}) " + text[i].strip('*^†∆') + "; " + entry_string = entry_string.strip("; ") - if len(entry_string) > 130: + + + if get_selection.visible_len(entry_string) > window_size - 40: # 130: print(entry_string[:entry_string[:130].rfind(' ')]) + if partOfSpeech == 'verb': - second_line = entry_string[entry_string[:130].rfind(' '):] - if len(second_line) > 80: - second_line = second_line[:77] + "..." + second_line = entry_string[entry_string[:window_size - 40].rfind(' '):] + if get_selection.visible_len(second_line) > window_size - 90: + second_line = second_line[:window_size - 93] + "..." print(f"{'.':.<50} | {second_line}") + else: - second_line = entry_string[entry_string[:150].rfind(' '):] - if len(second_line) > 100: - second_line = second_line[:97] + "..." + second_line = entry_string[entry_string[:window_size - 20].rfind(' '):] + if get_selection.visible_len(second_line) > window_size - 70: + second_line = second_line[:window_size - 73] + "..." print(f"{'.':.<30} | {second_line}") else: print(f"{entry_string}") + elif current_dict['language'] == "Ancient Greek": entry_string = word['entries'][x]['simpleParts'][:word['entries'][x]['simpleParts'].find(')')+1].strip() @@ -735,19 +757,22 @@ def print_gloss(current_dict,tags,partOfSpeech=None,tag_mode='1'): length_string = length_string.replace('φ','f') length_string = length_string.replace('ψ','c') entry_string += ' ' * (30 - len(unidecode(length_string))) + " | " + # check if definition exceeds desired length text = [line['gloss'] for line in word['entries'][x]['senses']] text = short_senses(text) + if len(text) == 1: entry_string += text[0] else: for i in range(len(text)): entry_string += f"{i+1}) " + text[i].strip('*^†∆') + "; " + entry_string = entry_string.strip("; ") - if len(entry_string) > 130: - print(entry_string[:entry_string[:130].rfind(' ')]) - print(f"{' ':<30} | {entry_string[entry_string[:130].rfind(' '):entry_string[:225].rfind(',')]}") + if len(entry_string) > window_size - 30: + print(entry_string[:entry_string[:window_size - 30].rfind(' ')]) + print(f"{' ':<30} | {entry_string[entry_string[:window_size - 30].rfind(' '):entry_string[:window_size + 68].rfind(',')]} ...") else: print(f"{entry_string}") @@ -813,7 +838,7 @@ def short_line(line,limit): # reconstruct string new_text = ", ".join(word.strip() for word in line) - return line + return new_text # SHORT senses # # # # # # # # # # @@ -829,6 +854,8 @@ def short_senses(text): text.remove('') for i in range(len(text)): + if isinstance(text[i],list): + print(text) text[i] = text[i].strip(',;') return text # END SHORT senses diff --git a/src/get_simple.py b/src/get_simple.py index 3fc23a0..c34b9d9 100644 --- a/src/get_simple.py +++ b/src/get_simple.py @@ -15,7 +15,6 @@ from load_dict import change_path, KAIKKI_JSON_FILES, SUPPLEMENTARY_LANGUAGE_FILES import pickle - def load_sorted_language(): try: print("Loading previous Latin trie...") @@ -220,10 +219,9 @@ def build_string(num_parts,comma_stop,parts,partOfSpeech,i_stem=False): # END BUILD STRING - # GET SIMPLE LA 'LATIN' # # # # # # # # # # # # # # # -def get_simple(partOfSpeech,parts,heading): +def get_simple(partOfSpeech,parts,heading,language='Latin'): # Only works on noun, verb, adjective, pronoun, determiner # adverbs, conjunctions, interjections, etc. remain the same @@ -236,6 +234,9 @@ def get_simple(partOfSpeech,parts,heading): if partOfSpeech not in ['noun','verb','adjective','participle','adverb','pronoun','determiner','numeral']: return parts + if language == "Ancient Greek" and partOfSpeech == 'verb': + return parts + # set some flags ['thid_decl', 'adjective_parts', 'deponent', 'defective', 'verb_label'] flags = set_flags_la(parts,partOfSpeech) @@ -246,6 +247,20 @@ def get_simple(partOfSpeech,parts,heading): # Chop parts down to simple list parts = chop_parts(parts) + + # experimenting with greek simple parts + if language == "Ancient Greek": + if "•" in parts: + print(parts) + if 'κρῐτῐκός' in parts: + exit() + + + if len(parts) >= 3 and language == "Ancient Greek": + parts = parts[:1] + parts[3:] + + + # abort here len(part) == 1, (not a main entry) if len(parts) == 1 or partOfSpeech == 'adverb': return parts[0] @@ -316,6 +331,7 @@ def get_simple(partOfSpeech,parts,heading): the 'Latin-i_stem_nouns-trie.txt' file. Could probably be removed now. ''' + #print(simpleParts) if i_stem_mode: if i_stem and partOfSpeech == 'noun': return simpleParts diff --git a/src/load_dict.py b/src/load_dict.py index 8f29655..fef8222 100644 --- a/src/load_dict.py +++ b/src/load_dict.py @@ -26,20 +26,24 @@ PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # define names for important directories -KAIKKI_JSON_FILES = 'kaikki_json_files' -SORTED_LANGUAGE_FILES = 'sorted_language_files' -SUPPLEMENTARY_LANGUAGE_FILES = 'supplementary_language_files' -USER_CREATED_DICTIONARIES = 'user_created_dictionaries' -FLASHCARD_TEMPLATE_FILES = 'flash_card_templates' -FORMATTED_FLASHCARD_FILES = 'formatted_flashcard_files' +KAIKKI_JSON_FILES = 'kaikki_json_files' +SORTED_LANGUAGE_FILES = 'sorted_language_files' +SUPPLEMENTARY_LANGUAGE_FILES = 'supplementary_language_files' +USER_CREATED_DICTIONARIES = 'user_created_dictionaries' +FLASHCARD_TEMPLATE_FILES = 'flash_card_templates' +FORMATTED_FLASHCARD_FILES = 'formatted_flashcard_files' + +language_options = ["Latin","Ancient Greek","Old English","Quechua"] # CHANGE PATH # # # # # # # # # def change_path(folder=''): path = os.path.join(PARENT_DIR,folder) + if not os.path.isdir(path): os.mkdir(path) os.chdir(path) + # END CHANGE PATH # FIND DICT @@ -147,8 +151,6 @@ def combine_dict(current_dict,combo_dict=[]): def pick_language(): - language_options = ["Latin","Ancient Greek","Old English"] - options = {'0':"\nChoose the language for your new dictionary ('0' to go back)\n==================================\n"} for i in range(len(language_options)): options.update({f"{i+1}":f"{i+1}. {language_options[i]}\n"}) diff --git a/src/main.py b/src/main.py index 36a3db4..3c39a70 100644 --- a/src/main.py +++ b/src/main.py @@ -20,7 +20,7 @@ message += "\n" + "Σ " * (window_size//2) middle = '\n\n' -middle += figlet_format(" Word-Hoarder +",font='epic',width=window_size) +middle += figlet_format(" Word-Hoarder +",font='epic',width=window_size) message += middle for i in range(5): if i%2 == 0: diff --git a/src/parser_shell.py b/src/parser_shell.py index c5099b7..2b1feb1 100644 --- a/src/parser_shell.py +++ b/src/parser_shell.py @@ -52,9 +52,9 @@ def add_word_options(current_dict): # is that the user will want to apply some common tags to all words # in a given session. if not tags: - options['0'] += "** No tags selected **\n" + options['0'] += "Session Tags: []\n" else: - options['0'] += f"* {', '.join(tags)} *\n" + options['0'] += f"Session Tags: {', '.join(tags)}\n" # ensure user makes a valid selection user_input = get_selection(options) @@ -123,7 +123,7 @@ def word_options(new_word,current_dict,backup,existing_word,t): options = { '1':f"\n===================================================\n"\ + f"Do you want to save, edit or discard '{heading}'?\n"} - options.update({'2':">>>(1=save, 2=edit, 0=discard)"}) + options.update({'2':">>>(1=save, 2=edit, 0=discard)"}) # option to create a morphology flashcard template file # and indicate that the table exists for future reference. @@ -229,9 +229,15 @@ def load_sorted_language(language): print(f"Loading {language}...") change_path(SORTED_LANGUAGE_FILES) - with open(language.replace(" ","") + '-trie.txt','rb') as openFile: - t = pickle.load(openFile) - return t['definitions'] + file_name = language.replace(" ","") + '-trie.txt' + try: + with open(file_name,'rb') as openFile: + t = pickle.load(openFile) + return t['definitions'] + except FileNotFoundError: + input(f'Error: "{file_name}" not found in sorted_languages_files;\ngo to data files in main menu to extract files.\n') + return None + def word_search(current_dict,tags): @@ -240,10 +246,12 @@ def word_search(current_dict,tags): # load datrie containing sorted language t = load_sorted_language(current_dict['language']) + if t is None: + return current_dict while True: # Retrieve use selection from dictionary - result = choose_from_trie(t,current_dict['language']) + result = choose_from_trie(t,current_dict,tags) clear_screen() # 'end' will be returned is user choose to end querying @@ -282,10 +290,13 @@ def center_text(text, total_width, alt_char=None): return '\n'.join([line[:total_width] for line in centered_lines]) -def choose_from_trie(t,lang,debug_print=True): +def choose_from_trie(t,current_dict,tags,debug_print=True): ''' Displays search window. Retreives words matching user search string from datrie and prompts user selection from matching words. ''' + # set language + lang = current_dict['language'] + # user search string prefix = '' @@ -356,6 +367,11 @@ def choose_from_trie(t,lang,debug_print=True): message += "clear, '000' to " message += "end" + if tags: + message += ", '^' display current gloss\nSession Tags: " + ", ".join(tags) + else: + message += "\nSession Tags: []" + # display message and wait for user input print(f"{message:<{window_size}}") user_input = input(": ") @@ -378,6 +394,16 @@ def choose_from_trie(t,lang,debug_print=True): elif user_input == '00' or user_input == '000': return None + # option to pretty print all words in current dictionary that match + # current session tags. + elif user_input == '^': + if tags == set(): + input("No tags set") + else: + count = edit_dictionary.filter_gloss(current_dict,tags,output_file=None,tag_mode='1') + print(f"\n\t{count} items with current tags\n") + continue + # invisible option: entry with a leading 0 will overwrite previous search string elif user_input[0] == '0': prefix = user_input[1:] diff --git a/src/tables_greek_ext.py b/src/tables_greek_ext.py index 844e2f2..7e4128f 100644 --- a/src/tables_greek_ext.py +++ b/src/tables_greek_ext.py @@ -1157,6 +1157,10 @@ def clean_page_list(page_list,exception=False): offset = 0 for i in range(1,len(page_list)): i = i - offset + if not page_list[i]: + del page_list[i] + offset += 1 + continue if ' ' in page_list[i]: page_list[i] = page_list[i].replace(' ',' ') if page_list[i].strip()[0] == "<": diff --git a/src/word_methods.py b/src/word_methods.py index 3867f31..0845d1b 100644 --- a/src/word_methods.py +++ b/src/word_methods.py @@ -89,7 +89,6 @@ def participle_edit(text,first): first = False text[num] = remove_or(text[num]) - new_text = '' for num in range(len(text)): if num < len(text) - 1: @@ -181,7 +180,6 @@ def copy_word(fetch_word,new_word,language): if partOfSpeech == 'participle': text, first = participle_edit(text,first) - senses.append(text.strip(".").replace(":",";")) # check if entry is a form of another main entry @@ -223,14 +221,12 @@ def find_root(roots_list,text,etymology): test = False - trial_list = [copy.deepcopy(etymology)] + copy.deepcopy(text) if etymology else copy.deepcopy(text) trial_list = chop_parens(trial_list) if test: print(trial_list) - word_bank = ['first-person','second-person','third-person','singular', 'plural','indicative','imperative','infinitive','subjunctive','active','passive', 'present','future','perfect','imperfect','pluperfect','participle', @@ -262,10 +258,6 @@ def find_root(roots_list,text,etymology): print("Continue 1") continue - - - - string = trial_list[index] string = string.split(' ') if len(string) == 1: @@ -283,7 +275,6 @@ def find_root(roots_list,text,etymology): del string[index] offset += 1 - for word in word_bank: if test: print(F"{word} {string}") @@ -311,18 +302,6 @@ def getTags(tags=set(),mode='',master_list=[]): # Whole function contained in loop while True: - # flag if tags already in place - if tags: - - # create list of tags - tag_string = ", ".join(f"'{tag}'" for tag in tags) - - # Print list with appropriate commas - if mode: - print(f"\n{mode.title()} tags: {tag_string}") - else: - print(f"\nCurrent tags: {tag_string}") - if mode: string = mode.title() + " " else: @@ -340,6 +319,8 @@ def getTags(tags=set(),mode='',master_list=[]): '4':">'4' to clear all\n"}) if master_list: options.update({}) + if tags: + options.update({'': "Tags: " + ", ".join(tags)}) user_input = get_selection.get_selection(options) # Option to finish, return to calling function @@ -363,7 +344,6 @@ def getTags(tags=set(),mode='',master_list=[]): del tags_list[int(user_input) - 1] tags = set(tags_list) # convert list back to set - # Option to clear all tags elif user_input.lower() == '4': tags = set() @@ -411,5 +391,6 @@ def get_master_list(current_dict): for tag in word['tags']: if tag not in master_list: master_list.append(tag) + master_list.sort() return master_list # END GET MASTER LIST diff --git a/src/word_print_edit.py b/src/word_print_edit.py index ef7dd39..7719df8 100644 --- a/src/word_print_edit.py +++ b/src/word_print_edit.py @@ -127,7 +127,7 @@ def edit_entries(new_word,current_dict,t): print("'1' to look up word, '2' to create new entry") user_input = input(": ") if user_input == '1': - new_word = word_combo(new_word,t,current_dict['language']) + new_word = word_combo(new_word,t,current_dict) else: new_entry, result = create_word.create_entry(new_word) @@ -278,10 +278,10 @@ def split_word(word,current_dict): # WORD COMBO # # # # # # # # # # # # # # # -def word_combo(new_word,t,lang,search_word=None): +def word_combo(new_word,t,current_dict,search_word=None): letters = [] """ Retrieve use selection from dictionary """ - combo_word = parser_shell.choose_from_trie(t,lang) + combo_word = parser_shell.choose_from_trie(t,current_dict,tags=set()) if combo_word == None: return new_word