added backend which maintains ai instances;

added frontend as control panel
This commit is contained in:
wea_ondara
2024-04-24 19:40:03 +02:00
parent 9cafd8b1e9
commit 41474d474c
79 changed files with 9022 additions and 0 deletions

51
backend/ai/ais/AiBase.py Normal file
View File

@@ -0,0 +1,51 @@
import datetime
import json
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
class AiBase:
model_id_or_path: str = None
model = None
tokenizer = None
def __init__(self, model_id_or_path: str = None):
self.model_id_or_path = model_id_or_path
print('Loading ' + model_id_or_path)
self.model_id_or_path = model_id_or_path
self.model = AutoModelForCausalLM.from_pretrained(model_id_or_path, torch_dtype='auto', device_map='auto')
self.tokenizer = AutoTokenizer.from_pretrained(model_id_or_path)
# print(self.tokenizer.default_chat_template)
# print(type(self.model))
# print(type(self.tokenizer))
print('Loaded')
def generate(self, messages):
return []
def record_conversation(self, messages, response):
messages = messages + [response]
this_dir = os.path.dirname(os.path.abspath(__file__))
conversations_folder = this_dir + '/../../../conversations'
folder = conversations_folder + '/' + self.model_id_or_path.replace('/', '_')
self._mkdir(conversations_folder)
self._mkdir(folder)
timestamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
pickle_filename = folder + '/' + timestamp + '.json'
with open(pickle_filename, 'w') as file:
json.dump(messages, file)
def _mkdir(self, path):
if not os.path.isdir(path):
os.mkdir(path)
def __del__(self):
del self.model
del self.tokenizer
import gc
gc.collect()
torch.cuda.empty_cache()

38
backend/ai/ais/QwenAi.py Normal file
View File

@@ -0,0 +1,38 @@
import torch
from .AiBase import AiBase
class QwenAi(AiBase):
default_device = 'cuda' # the device to load the model onto
default_model_id = 'Qwen/Qwen1.5-1.8B-Chat'
default_instruction = {'role': 'system',
'name': 'system',
'content': 'Your name is "Laura". You are an AI created by Alice.'}
def __init__(self, model_id_or_path=default_model_id):
super().__init__(model_id_or_path)
def generate(self, messages):
try:
# prepare
messages = [m for m in messages if m['role'] != 'system']
input_messages = [self.default_instruction] + messages
# generate
text = self.tokenizer.apply_chat_template(input_messages, tokenize=False, add_generation_prompt=True)
model_inputs = self.tokenizer([text], return_tensors='pt').to(self.default_device)
generated_ids = self.model.generate(model_inputs.input_ids, max_new_tokens=300)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
# add response and save conversation
response_entry = {'role': 'assistant', 'name': 'assistant', 'content': response}
messages.append(response_entry)
self.record_conversation(input_messages, response_entry)
return messages
finally:
torch.cuda.empty_cache() # clear cache or the gpu mem will be used a lot

View File