Skip to content

Commit

Permalink
refactor: rename python files
Browse files Browse the repository at this point in the history
  • Loading branch information
LogCreative committed Jul 7, 2024
1 parent 4f505d9 commit 2f258d6
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 31 deletions.
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ TikzEdt is the inspiration of this project, to create LaTeX TikZ graph in WYSIWY
```
- Start the server.
```bash
python server.py
python ppedt_server.py
```
- Open `http://127.0.0.1:5678` in your browser.
> For Windows users, the server side will automatically change the fontset used by CJK for a larger compatibility.
Expand All @@ -43,12 +43,12 @@ TikzEdt is the inspiration of this project, to create LaTeX TikZ graph in WYSIWY
- Install [Anaconda](https://www.anaconda.com/download/success) first.
- Use the commands to set the environment, you don't have to setup the environment repeatedly on later runs:
```bash
conda env update -n ppedt -f llm.yml
conda env update -n ppedt -f ppedt_server_llm.yml
```
- Use the commands to start the PGFPlotsEdt server with LLM:
```bash
conda activate ppedt
python server_llm.py
python ppedt_server_llm.py
```
On the first run, the model will be downloaded, which takes some time and disk space. The Llama 3 model will be saved in a temporary folder (in `~/.cache/mlc_llm` directory) and the loading time will be much faster in the following runs.
- Open `http://127.0.0.1:5678` in your browser, then press "Edit code manually" button (or just open `http://127.0.0.1:5678?code_only` directly). If there is a LLM toolbar at the bottom of the text editor, then LLM is ready to go.
Expand Down
38 changes: 19 additions & 19 deletions deploy/gunicorn-deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

import sys
sys.path.append('..')
import server
import ppedt_server
from res.version_updater import write_version_info
from config import *

Expand All @@ -34,7 +34,7 @@ def number_of_workers():
def run_cmd_with_timeout(cmd: str):
try:
p = subprocess.Popen(
"cd {} && {}".format(server.tmpdir, cmd), # cmd
"cd {} && {}".format(ppedt_server.tmpdir, cmd), # cmd
stdout=subprocess.PIPE, # hide output
shell=True, # run in shell to prevent error
start_new_session=True # create a process group
Expand All @@ -44,33 +44,33 @@ def run_cmd_with_timeout(cmd: str):
os.killpg(os.getpgid(p.pid), signal.SIGTERM) # prevent background running
raise subprocess.TimeoutExpired(p.args, TIMEOUT) # raise the exception for the main loop
# Patch server run_cmd
server.run_cmd = run_cmd_with_timeout
ppedt_server.run_cmd = run_cmd_with_timeout


def tex_length_limit_hook(tex: str):
if len(tex) > LENGTH_LIMIT:
raise Exception("The length of the LaTeX source is too long.")
# Patch server tex_length_limit_hook
server.tex_length_limit_hook = tex_length_limit_hook
ppedt_server.tex_length_limit_hook = tex_length_limit_hook

tmp_header_cache_dir = os.path.join(server.tmpdir, 'cache')
tmp_header_cache_dir = os.path.join(ppedt_server.tmpdir, 'cache')

def get_header_hashed_name(header: str):
header_hash = hashlib.sha256((header).encode()).hexdigest()[:16]
return "{}_header.fmt".format(header_hash)

def compile_header_cached(cur_header: str, compiler: str, sessid: str):
header_name = server.get_header_name(sessid)
header_name = ppedt_server.get_header_name(sessid)
header_hased_name = get_header_hashed_name(cur_header)
header_hashed_path = os.path.join(tmp_header_cache_dir, header_hased_name)
header_ref_path = os.path.join(server.tmpdir, "{}.fmt".format(header_name))
header_ref_path = os.path.join(ppedt_server.tmpdir, "{}.fmt".format(header_name))
# remove the original link
if os.path.exists(header_ref_path):
os.unlink(header_ref_path)
if server.same_or_write(header_name, cur_header) and not os.path.isfile(header_hashed_path):
server.clean_log(header_name)
server.clean_log(server.get_body_name(sessid))
server.run_cmd(server.header_cmd(header_name, compiler))
if ppedt_server.same_or_write(header_name, cur_header) and not os.path.isfile(header_hashed_path):
ppedt_server.clean_log(header_name)
ppedt_server.clean_log(ppedt_server.get_body_name(sessid))
ppedt_server.run_cmd(ppedt_server.header_cmd(header_name, compiler))
if not os.path.isfile(header_ref_path):
return # early stop if the compilation failed
# rename the compiled header to hased header name,
Expand All @@ -85,7 +85,7 @@ def compile_header_cached(cur_header: str, compiler: str, sessid: str):
# in this situation, recompiling will work.
os.symlink(header_hashed_path, header_ref_path)
# Patch server compile_header
server.compile_header = compile_header_cached
ppedt_server.compile_header = compile_header_cached


def reqid_hook(reqid: str):
Expand All @@ -97,7 +97,7 @@ def reqid_hook(reqid: str):
reqhash = hash(reqid) # reqid should be string
reqhash += sys.maxsize + 1 # make it positive, https://stackoverflow.com/a/18766856
return hex(reqhash)[2:] # remove the '0x'
server.reqid_hook = reqid_hook
ppedt_server.reqid_hook = reqid_hook


class StandaloneApplication(gunicorn.app.base.BaseApplication):
Expand All @@ -119,7 +119,7 @@ def load(self):

def on_starting(serv):
# Use a shared dict to store compiling sessions
server.compiling_sessions = multiprocessing.Manager().dict()
ppedt_server.compiling_sessions = multiprocessing.Manager().dict()
serv.log.info('''
PGFPlotsEdt Deployment Server
Expand Down Expand Up @@ -152,7 +152,7 @@ def dir_clean_LRU(dirpath: str, key_suffix: str = '.tex'):
header_list.sort(key=lambda x: x.stat().st_atime)
sessid_list = [f.stem.split('_')[0] for f in header_list]
# remove compiling sessions
sessid_list = list(filter(lambda x: x not in server.compiling_sessions.keys(), sessid_list))
sessid_list = list(filter(lambda x: x not in ppedt_server.compiling_sessions.keys(), sessid_list))
if len(sessid_list) >= CACHE_SIZE:
# Remove one at a time.
sessid_removal = sessid_list[0]
Expand All @@ -163,7 +163,7 @@ def dir_clean_LRU(dirpath: str, key_suffix: str = '.tex'):
def pre_request(worker, req):
# Implement LRU cache on the deploy side.
if req.method == 'POST' and req.path == '/compile':
dir_clean_LRU(server.tmpdir, '.tex')
dir_clean_LRU(ppedt_server.tmpdir, '.tex')
dir_clean_LRU(tmp_header_cache_dir, '.fmt')


Expand All @@ -175,9 +175,9 @@ def pre_request(worker, req):
'pre_request': pre_request,
'errorlog': 'error.log',
}
deployApp = server.app
os.makedirs(server.tmpdir, exist_ok=True)
deployApp = ppedt_server.app
os.makedirs(ppedt_server.tmpdir, exist_ok=True)
os.makedirs(tmp_header_cache_dir, exist_ok=True)
ver = write_version_info(os.path.join(server.rootdir, "res"))
ver = write_version_info(os.path.join(ppedt_server.rootdir, "res"))
print("PGFPlotsEdt {} deployment server is running, see error.log for running information.".format(ver))
StandaloneApplication(deployApp, options).run()
2 changes: 1 addition & 1 deletion server.py → ppedt_server.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
PGFPlotsEdt main LaTeX backend server
PGFPlotsEdt LaTeX backend server
"""

# Copyright (c) Log Creative 2020--2024.
Expand Down
16 changes: 8 additions & 8 deletions server_llm.py → ppedt_server_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
engine = None

import sys
import server
import ppedt_server
from res.version_updater import write_version_info

def llm_hook(code, prompt):
Expand All @@ -36,27 +36,27 @@ def llm_hook(code, prompt):
):
yield response.choices[0].delta.content

server.llm_hook = llm_hook
ppedt_server.llm_hook = llm_hook


def llm_test():
return "PGFPlotsEdt LaTeX Server: POST a request (code, prompt) to LLM.\n", 200

server.llm_test = llm_test
ppedt_server.llm_test = llm_test


if __name__ == '__main__':
print("Loading LLM model...")
engine = MLCEngine(model)

ver = write_version_info(os.path.join(server.rootdir, "res"))
ver = write_version_info(os.path.join(ppedt_server.rootdir, "res"))
print("PGFPlotsEdt {} with Llama 3".format(ver))

# Clean up the tmpdir and create a new one.
if os.path.isdir(server.tmpdir):
shutil.rmtree(server.tmpdir)
os.mkdir(server.tmpdir)
if os.path.isdir(ppedt_server.tmpdir):
shutil.rmtree(ppedt_server.tmpdir)
os.mkdir(ppedt_server.tmpdir)

server.app.run(host="127.0.0.1", port=5678)
ppedt_server.app.run(host="127.0.0.1", port=5678)

print("\nPress CTRL+C again to exit.")
1 change: 1 addition & 0 deletions llm.yml → ppedt_server_llm.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# Set up the conda environment for the LLM server -- ppedt_server_llm.py
dependencies:
- python=3.8
- pip
Expand Down

0 comments on commit 2f258d6

Please sign in to comment.