also clear cuda cache on error

This commit is contained in:
wea_ondara
2024-04-18 15:51:28 +02:00
parent 7205cf5164
commit 30cce3842e

View File

@@ -26,6 +26,7 @@ class ChatQwen:
print('Loaded') print('Loaded')
def generate(self, messages): def generate(self, messages):
try:
# prepare # prepare
messages = [m for m in messages if m['role'] != 'system'] messages = [m for m in messages if m['role'] != 'system']
input_messages = [self.default_instruction] + messages input_messages = [self.default_instruction] + messages
@@ -33,7 +34,7 @@ class ChatQwen:
# generate # generate
text = self.tokenizer.apply_chat_template(input_messages, tokenize=False, add_generation_prompt=True) text = self.tokenizer.apply_chat_template(input_messages, tokenize=False, add_generation_prompt=True)
model_inputs = self.tokenizer([text], return_tensors='pt').to(self.default_device) model_inputs = self.tokenizer([text], return_tensors='pt').to(self.default_device)
generated_ids = self.model.generate(model_inputs.input_ids, max_new_tokens=100) generated_ids = self.model.generate(model_inputs.input_ids, max_new_tokens=300)
generated_ids = [ generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
] ]
@@ -43,8 +44,9 @@ class ChatQwen:
messages.append({'role': 'assistant', 'content': response}) messages.append({'role': 'assistant', 'content': response})
self.record_conversation(input_messages, {'role': 'assistant', 'content': response}) self.record_conversation(input_messages, {'role': 'assistant', 'content': response})
torch.cuda.empty_cache() # clear cache or the gpu mem will be used a lot
return messages return messages
finally:
torch.cuda.empty_cache() # clear cache or the gpu mem will be used a lot
def record_conversation(self, messages, response): def record_conversation(self, messages, response):
messages = messages + [response] messages = messages + [response]