67 lines
2.2 KiB
Python
67 lines
2.2 KiB
Python
import os
|
|
import re
|
|
import asyncio
|
|
import emoji
|
|
import aiogoogletrans
|
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
from dotenv import find_dotenv, load_dotenv
|
|
from ollama import generate
|
|
|
|
from Craft.module.ml.prompt import generate_system, generate_user, generate_system_eng, generate_user_eng
|
|
from Craft.module.ml.util import extract_emoji_and_text
|
|
load_dotenv(find_dotenv())
|
|
|
|
translator = aiogoogletrans.Translator()
|
|
|
|
class Engine:
|
|
def __init__(self):
|
|
self.key = None
|
|
|
|
|
|
def _generate(self, first_word: str, second_word: str, eng_result: str) -> str:
|
|
gen_data = generate(
|
|
model="infcraft:minial",
|
|
system=generate_system(first_word, second_word, eng_result),
|
|
prompt=generate_user(first_word, second_word),
|
|
keep_alive=60*60*24,
|
|
context=None,
|
|
options={
|
|
"seed" : 0,
|
|
"temperature" : 0.2,
|
|
"top_p" : 0.85,
|
|
"top_k" : 0.1,
|
|
"max_tokens" : 32,
|
|
"main_gpu" : 0,
|
|
}
|
|
)
|
|
# 반환된 값의 response에서 이모지만 추출
|
|
return gen_data
|
|
|
|
async def _generate_eng(self, first_word: str, second_word: str) -> str:
|
|
gen_data = generate(
|
|
model="mistral:latest",
|
|
system=await generate_system_eng(first_word, second_word),
|
|
prompt=await generate_user_eng(first_word, second_word),
|
|
keep_alive=60*60*24,
|
|
options={
|
|
"seed" : 0,
|
|
"temperature" : 0.2,
|
|
"top_p" : 1,
|
|
"top_k" : 0.1,
|
|
"max_tokens" : 64,
|
|
"main_gpu" : 0,
|
|
}
|
|
)
|
|
return gen_data
|
|
|
|
async def generate(self, first_word: str, second_word: str) -> str:
|
|
with ThreadPoolExecutor() as executor:
|
|
loop = asyncio.get_event_loop()
|
|
eng_result = await self._generate_eng(first_word, second_word)
|
|
data = await loop.run_in_executor(executor, self._generate, first_word, second_word, eng_result['response'])
|
|
return data
|
|
|
|
|
|
data = asyncio.run(Engine().generate("👮 윤석열", "👮 이재명"))
|
|
print(extract_emoji_and_text(data['response'])) |