2024-03-17 00:42:36 +00:00
|
|
|
import io
|
|
|
|
import time
|
|
|
|
import PIL
|
|
|
|
import base64
|
|
|
|
import re
|
|
|
|
import random
|
|
|
|
import aiohttp
|
|
|
|
import asyncio
|
|
|
|
import sentry_sdk
|
|
|
|
from aiogoogletrans import Translator
|
|
|
|
from typing import Any, Dict
|
|
|
|
from discord import File, Asset
|
|
|
|
from RUNA.Tagging import Tagging
|
|
|
|
from RUNA.Cogs.Event import model
|
|
|
|
translator = Translator()
|
|
|
|
BLOCKTAG = [
|
|
|
|
"nsfw",
|
|
|
|
"nude",
|
|
|
|
"nipples",
|
|
|
|
"nipple",
|
|
|
|
"pussy",
|
|
|
|
"public hair",
|
|
|
|
"gay",
|
|
|
|
"lesbian",
|
|
|
|
"corpse",
|
|
|
|
"no panties",
|
|
|
|
"no panty",
|
|
|
|
"no bra",
|
|
|
|
"bra",
|
|
|
|
"panty",
|
|
|
|
"panties",
|
|
|
|
"underwear",
|
|
|
|
"undergarment",
|
|
|
|
"underpants",
|
|
|
|
"underpant",
|
|
|
|
"blowjob",
|
|
|
|
"sex",
|
|
|
|
"sexy",
|
|
|
|
"pennis"
|
|
|
|
"realistic",
|
|
|
|
"open breasts",
|
|
|
|
"breasts",
|
|
|
|
"bikini",
|
|
|
|
"swimsuit",
|
|
|
|
"give birth",
|
|
|
|
"slave"
|
|
|
|
]
|
|
|
|
|
|
|
|
weight = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.8, 0.9]
|
|
|
|
cfg = [6, 7, 8, 9, 10, 11]
|
|
|
|
#def check_curse(text: str):
|
|
|
|
# return korcen_checker.check(text)
|
|
|
|
def is_korean(string):
|
|
|
|
pattern = re.compile(r"[ㄱ-ㅎㅏ-ㅣ가-힣]")
|
|
|
|
match = pattern.search(string)
|
|
|
|
return bool(match)
|
|
|
|
|
|
|
|
async def process_prompt(prompt: str, remove: str, res: list, isnsfw: bool, style1: float, style2: float, afterprocess: bool, avatar):
|
|
|
|
tags = None
|
|
|
|
if not avatar == None:
|
|
|
|
# gif인 경우 png로 변환
|
|
|
|
try:
|
|
|
|
if avatar.url.endswith(".gif"):
|
|
|
|
avatar = avatar.replace(format="png")
|
|
|
|
avatar = await avatar.read()
|
|
|
|
# TypeError: a bytes-like object is required, not 'PngImageFile'
|
|
|
|
taging = Tagging(model=model)
|
|
|
|
tags = await taging.predict(avatar)
|
|
|
|
tags = tags[0]
|
|
|
|
except Exception as e:
|
|
|
|
tags = None
|
|
|
|
man = False
|
|
|
|
if "남자" in prompt and "여자" in remove:
|
|
|
|
man = True
|
|
|
|
if is_korean(prompt):
|
|
|
|
prompt = await translator.translate(prompt, dest="en")
|
|
|
|
prompt = prompt.text
|
|
|
|
if is_korean(remove):
|
|
|
|
remove = await translator.translate(remove, dest="en")
|
|
|
|
remove = remove.text
|
|
|
|
if isnsfw == False:
|
|
|
|
for i in BLOCKTAG:
|
|
|
|
if i in prompt:
|
|
|
|
prompt = prompt.replace(i, "")
|
|
|
|
default_negative = """(KHFB, AuroraNegative, easynegative, negative_hand-neg, verybadimagenegative_v1.3:0.8), (Worst Quality, Low Quality:1.4), border, skimpy, grayscale, multiple_girls, 3d, realistic, string, multiple hands, chinese, thick abs, chubby abs, lowres, bad anatomy, asymmetric wings, elf, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, large areolae, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, Multiple legs, multiple feet, tie, (necktie:1.5), several hands, three feet, four legs, three legs,animal ears, nsfw, exposure"""
|
|
|
|
if style1 != 0:
|
|
|
|
prompt = prompt + f"<lora:addDetail:{style1}>"
|
|
|
|
if style2 != 0:
|
|
|
|
prompt = prompt + f"<lora:光影:{style2}>"
|
|
|
|
if remove != None:
|
|
|
|
negative_prompt = default_negative + "," + remove
|
|
|
|
if man == True:
|
|
|
|
prompt = prompt + "," + "(1boy)"
|
|
|
|
negative_prompt = negative_prompt + "," + "(1girl)"
|
|
|
|
add_prompt = random.choice([True, False])
|
|
|
|
if add_prompt == True:
|
|
|
|
prompt = prompt + f"<lora:canistermix1.1:{random.choice(weight)}>"
|
|
|
|
if tags != None:
|
|
|
|
prompt = prompt + "," + tags
|
|
|
|
payloads = {}
|
|
|
|
if afterprocess == True:
|
|
|
|
payloads = {
|
|
|
|
"prompt": prompt,
|
|
|
|
"negative_prompt": negative_prompt,
|
|
|
|
"enable_hr": True,
|
|
|
|
"hr_scale": 1.5,
|
|
|
|
"hr_upscaler": "R-ESRGAN 4x+ Anime6B",
|
|
|
|
"seed": random.randint(0, 1000000000),
|
|
|
|
"steps": 25,
|
|
|
|
"cfg_scale": random.choice(cfg),
|
|
|
|
"width": res[0],
|
|
|
|
"height": res[1],
|
|
|
|
"sampler_index": "DPM++ 2M Karras",
|
|
|
|
"refiner_checkpoint": "smooREFINERV2R10_half",
|
|
|
|
"refiner_switch_at": 0.45,
|
|
|
|
"alwayson_scripts": {
|
|
|
|
"ADetailer": {
|
|
|
|
'args': [
|
|
|
|
{
|
|
|
|
'ad_model': 'face_yolov8n.pt',
|
|
|
|
'ad_inpaint_only_masked': True
|
|
|
|
}]
|
|
|
|
}}}
|
|
|
|
else:
|
|
|
|
payloads = {
|
|
|
|
"prompt": prompt,
|
|
|
|
"negative_prompt": negative_prompt,
|
|
|
|
"seed": random.randint(0, 1000000000),
|
|
|
|
"steps": 25,
|
|
|
|
"cfg_scale": random.choice(cfg),
|
|
|
|
"width": res[0],
|
|
|
|
"height": res[1],
|
|
|
|
"sampler_index": "DPM++ 2M Karras",
|
|
|
|
"refiner_checkpoint": "smooREFINERV2R10_half",
|
|
|
|
"refiner_switch_at": 0.45,
|
|
|
|
"alwayson_scripts": {
|
|
|
|
"ADetailer": {
|
|
|
|
'args': [
|
|
|
|
{
|
|
|
|
'ad_model': 'face_yolov8n.pt',
|
|
|
|
'ad_inpaint_only_masked': True
|
|
|
|
}]
|
|
|
|
}}}
|
|
|
|
return payloads
|
|
|
|
|
|
|
|
|
|
|
|
async def post_gpu_server(url: str, payload: Dict[str, Any]):
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
try:
|
|
|
|
async with session.post(url, json=payload, timeout=300) as response:
|
|
|
|
if response.status == 200:
|
|
|
|
return {"status": True, "data": await response.json()}
|
|
|
|
else:
|
|
|
|
return {"status": False, "data": None}
|
|
|
|
except Exception as e:
|
|
|
|
if isinstance(e, asyncio.TimeoutError):
|
|
|
|
return {"status": False, "data": None}
|
|
|
|
|
|
|
|
|
|
|
|
async def image_to_base64(image) -> str:
|
|
|
|
img = PIL.Image.open(image)
|
|
|
|
img = img.convert("RGB")
|
|
|
|
img_byte_arr = io.BytesIO()
|
|
|
|
img.save(img_byte_arr, format='PNG')
|
|
|
|
img_byte_arr = img_byte_arr.getvalue()
|
|
|
|
img_str = base64.b64encode(img_byte_arr).decode("utf-8")
|
|
|
|
return img_str
|
|
|
|
|
|
|
|
|
|
|
|
async def base64_to_image(base642) -> File:
|
|
|
|
attachment = File(io.BytesIO(base64.b64decode(base642)), filename="image.png")
|
|
|
|
return attachment
|
|
|
|
|
|
|
|
async def get_gpuserver_status(url) -> Dict:
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
try:
|
|
|
|
async with session.get(f"http://172.30.1.49:7860/sdapi/v1/memory", timeout=10) as response:
|
|
|
|
|
|
|
|
if response.status == 200:
|
|
|
|
result = await response.json()
|
|
|
|
memstatus = result["ram"]["used"]
|
|
|
|
cudamemstatus = result["cuda"]["system"]["used"]
|
|
|
|
oomcount = result["cuda"]["events"]["oom"]
|
|
|
|
return {"status": "online", "system_memory_usage": bytes_to_gb(memstatus), "cuda_memory_usage": bytes_to_gb(cudamemstatus), "oom_count": oomcount}
|
|
|
|
except Exception as e:
|
|
|
|
sentry_sdk.capture_exception(e)
|
|
|
|
return {"status": "offline"}
|
|
|
|
|
|
|
|
def bytes_to_gb(bytes: int) -> float:
|
|
|
|
return round(bytes / 1024 / 1024 / 1024, 2)
|
|
|
|
|
|
|
|
|
|
|
|
async def Get_Backend_latency():
|
|
|
|
start_time = time.time()
|
|
|
|
async with aiohttp.ClientSession() as client:
|
|
|
|
try:
|
|
|
|
async with client.get("http://172.30.1.49:7860/sdapi/v1/memory", timeout=10) as response:
|
|
|
|
if response.status_code == 200 or response.status_code == 404:
|
|
|
|
return round(time.time() - start_time, 2)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
2023-12-02 06:03:50 +00:00
|
|
|
return None
|