Skip to content

Commit

Permalink
Merge pull request #21 from WolfTheDeveloper/wtd
Browse files Browse the repository at this point in the history
Embed into Python
  • Loading branch information
Zander Lewis authored May 10, 2024
2 parents 0096894 + 1e2a4cc commit 02a551c
Show file tree
Hide file tree
Showing 4 changed files with 225 additions and 175 deletions.
175 changes: 2 additions & 173 deletions llamascript/__init__.py
Original file line number Diff line number Diff line change
@@ -1,173 +1,2 @@
import asyncio
import ollama
import logging
import sys
import subprocess
import os

# Set up logging
logging.basicConfig(level=logging.WARNING)


class llama:
def __init__(self):
self.model = ""
self.data = ""
self.system = []
self.ignore = False

def USE(self, line):
if line.split(" ")[0] == "USE":
self.model = line.split(" ")[1].strip()
else:
raise ValueError("Invalid model")

def PROMPT(self, line="", p=""):
if p != "":
self.data = p
else:
split_line = line.split(" ", 1)
self.data = split_line[1] if len(split_line) > 1 else ""

def SYSTEM(self, line="", p=""):
if p != "":
self.system = [{"role": "system", "content": p}]
else:
split_line = line.split(" ", 1)
prompt = split_line[1] if len(split_line) > 1 else ""
self.system = [{"role": "system", "content": prompt}]

def CHAT(self, stream: bool = False):
for _ in range(3):
try:
response = ollama.chat(
model=self.model,
messages=self.system + [{"role": "user", "content": self.data}],
stream=stream,
)
if stream:
for message in response:
print(message["message"]["content"], end="")
print()
else:
print(response["message"]["content"])
break
except Exception as e:
logging.error("Error using model: %s", e)
print("Model not loaded. Trying to load model...")
ollama.pull(self.model)
print("Model loaded. Trying again...")
else:
raise ValueError(
"Model does not exist or could not be loaded. Please try again."
)

def INPUT(self, command):
if command == "SYSTEM":
self.SYSTEM(p=input("Enter system prompt: "))
elif command == "PROMPT":
self.PROMPT(p=input("Enter prompt: "))
else:
raise ValueError("Invalid command for INPUT")

def CREATE_MODEL(self, filename, parameters, model_name):
try:
with open(filename, "w") as file:
file.write(
f'FROM {parameters["model"]}\nPARAMETER temperature {parameters["temperature"]}\nSYSTEM """\n{parameters["system_message"]}\n"""\n'
)
print(f"Modelfile created.")
command = ["ollama", "create", model_name, "-f", "./Modelfile"]
process = subprocess.Popen(
command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
creationflags=subprocess.CREATE_NO_WINDOW,
)
stdout, stderr = process.communicate()
print("Model created.")

if process.returncode != 0:
if stderr is not None:
print(f"Error executing command: {stderr.decode()}")
else:
if stdout is not None:
print(stdout.decode())
print("Removing Modelfile...")
os.remove(filename)

except Exception as e:
logging.error("Error creating model file: %s", e)
print(f"Error creating model file {filename}.")

async def read(self, filename):
try:
with open(filename, "r") as file:
for line in file:
line = line.strip()
if not line:
continue
command = line.split(" ")
if command[0] == "IGNORE":
self.ignore = True
elif command[0] == "USE":
self.USE(line)
elif len(command) > 1 and command[1] == "INPUT":
self.INPUT(command[0])
elif command[0] == "SYSTEM":
self.SYSTEM(line=line)
elif command[0] == "PROMPT":
self.PROMPT(line=line)
elif command[0] == "SAVE":
if len(command) < 2:
logging.error("No filename provided")
print("No filename provided")
sys.exit(1)
model_name = command[1]
parameters = {
"model": self.model,
"temperature": command[2] if len(command) > 2 else 0.7,
"system_message": self.system[0]["content"],
}
self.CREATE_MODEL("Modelfile", parameters, model_name)
elif command[0] == "CHAT":
if len(command) > 1 and command[1] == "STREAM":
stream = command[1] == True
else:
stream = False
if not self.ignore:
print(
'=================\nThanks for using llama, a no-code AI chatbot. Please ensure Ollama (https://ollama.com) is running. To get started, type "USE" followed by the model you want to use. Then, type "PROMPT" followed by the prompt you want to use. Finally, type "CHAT" to chat with the AI. To run a script, type "llamascript" to run your script. To ignore this message, add "IGNORE" to the beginning of your llama file.\n================='
)
self.ignore = True
self.CHAT(stream=stream)
else:
raise ValueError("Invalid command")
except FileNotFoundError:
logging.error("File %s not found.", filename)
print(f"File {filename} not found.")


import argparse


def run():
parser = argparse.ArgumentParser(description="Run llama script.")
parser.add_argument("file_name", type=str, help="The name of the file to run")

args = parser.parse_args()

if not (args.file_name.endswith(".llama") or args.filename == "llama"):
logging.error("Invalid file type. Please provide a .llama or llama file.")
print("Invalid file type. Please provide a .llama or llama file.")
sys.exit(1)

try:
l = llama()
asyncio.run(l.read(args.file_name))
except KeyboardInterrupt:
pass


if __name__ == "__main__":
run()
from lang import *
from embedded import *
48 changes: 48 additions & 0 deletions llamascript/embedded.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from lang import llama

class LlamaScriptRunner:
def __init__(self):
self.llama = llama()

def use(self, model):
try:
self.llama.USE(f"USE {model}")
return True
except ValueError:
return False

def prompt(self, prompt):
self.llama.PROMPT(f"PROMPT {prompt}")
return True

def system(self, system_prompt):
self.llama.SYSTEM(f"SYSTEM {system_prompt}")
return True

def chat(self, stream=False):
try:
self.llama.CHAT(stream)
return True
except ValueError:
return False

def input(self, command):
try:
self.llama.INPUT(command)
return True
except ValueError:
return False

def create_model(self, filename, parameters, model_name):
try:
self.llama.CREATE_MODEL(filename, parameters, model_name)
return True
except Exception:
return False

async def read(self, filename):
try:
await self.llama.read(filename)
return True
except FileNotFoundError:
return False
Loading

0 comments on commit 02a551c

Please sign in to comment.