-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLocal_LLMs
58 lines (39 loc) · 1.97 KB
/
Local_LLMs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
#Below is an easy way to run/test opensource LLMs locally using Ubuntu Terminal
#We are going to be using Llamafile by Justinne Tunney
# First we want to create a custom folder and add it to $PATH
# Navigate to an area of your choice and use mkdir
cd /usr/sbin
mkdir llm
nano ~/.bashrc
export PATH="usr/sbin/llm:$PATH"
# First we will download llamafile, this is an executable allowing you to easily attach and run any GGUF weights from huggingface
curl -L -o llamafile.exe https://github.com/Mozilla-Ocho/llamafile/releases/download/0.6/llamafile-0.6
chmod +x llamafile.exe
# Then we want to coppy or move that file into our custom folder in $PATH
cp llamafile.exe /usr/sbin/llm/llamafile.exe
# Next you want to install huggingface-hub
pip3 install huggingface-hub
# Next install weights via huggingface, we are going to try phi-2 via TheBloke - stick to 7B models to start, beware of your computer's resources.
huggingface-cli download TheBloke/phi-2-GGUF phi-2.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
# Now that the weights are downloaded, lets create a short script:
nano phi-2
#!/bin/bash
llamafile.exe -m phi-2.Q4_K_M.gguf
# Save and Exit
chmod +x phi-2
# Move to directory in $PATH
mv phi-2 /usr/sbin/llm/phi-2
# Now since we have llamafile.exe and the phi-2 script both in our folder in $PATH, to launch the model just type:
phi-2
# This will spin a locally hosted gradio server running 100% off of your CPU.
### Troubleshooting huggingface-cli:
### If you recently installed huggingface-hub, and are getting an error when running huggingface-cli, see ### below:
sudo apt install python3-pip
### or
sudo apt install python-pip
### then
### pip3 python executables get stored in /.local/bin, so add this to path
export PATH="$HOME/.local/bin:$PATH"
### attempt huggingface-cli download again
# Another way to download weights is curl
curl -L -o mistral.gguf https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf