Skip to content

Commit

Permalink
修改名称
Browse files Browse the repository at this point in the history
  • Loading branch information
StartHua committed Jan 20, 2024
1 parent 9bf090d commit bd500a3
Show file tree
Hide file tree
Showing 9 changed files with 178 additions and 94 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
AssetKey.json
41 changes: 14 additions & 27 deletions CXH_ALY_Seg_Cloth.py → ALY_Seg_Cloth.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,38 +4,21 @@
from typing import List

from .AlyVision import imagese
from alibabacloud_imageseg20191230.client import Client
from alibabacloud_imageseg20191230.models import SegmentClothAdvanceRequest
from alibabacloud_tea_openapi.models import Config
from alibabacloud_tea_util.models import RuntimeOptions


import os
import io
import sys
import re
import json
import time
import torch
import psutil
import random
import datetime
import comfy.sd
import comfy.utils
import numpy as np
import folder_paths
import comfy.samplers
import latent_preview
import comfy.model_base
from pathlib import Path
import comfy.model_management
from urllib.request import urlopen
from collections import defaultdict
from PIL.PngImagePlugin import PngInfo
from PIL import Image, ImageDraw, ImageFont
from typing import Dict, List, Optional, Tuple, Union, Any
import nodes
from PIL import Image,ImageOps

from .utils import *

Expand All @@ -44,7 +27,7 @@



class CXH_ALY_Seg_Cloth:
class ALY_Seg_Cloth:

def __init__(self):
pass
Expand All @@ -54,26 +37,30 @@ def INPUT_TYPES(cls):
return {"required":
{
"cloth_type": (["None","tops", "coat","skirt","pants","bag","shoes","hat"],{"default":"None"} ),
"image_path": ("STRING",{"default": "","multiline": False}),
"image":("IMAGE", {"default": "","multiline": False}),
"return_form": (["whiteBK", "mask"],{"default":"mask"} ),
}
}

RETURN_TYPES = ("IMAGE","IMAGE")
RETURN_NAMES = ("source","class")
RETURN_NAMES = ("cloth","part")
OUTPUT_NODE = True
FUNCTION = "sample"
CATEGORY = "CXH"

def sample(self,cloth_type,image_path,return_form):

if not os.path.exists(image_path):
print("文件不存在:" + image_path)
return
im = open(image_path, 'rb')
def sample(self,cloth_type,image,return_form):

now = datetime.datetime.now()
date_str = now.strftime("%Y-%m-%d")
folder_path = os.path.join(custom_nodes_path,"Comfyui_ALY","cache",f"{date_str}.jpg")

# 零时缓存转换成阿里io.buff
save_tensor_image(image,folder_path)

imp1 = open(folder_path, 'rb')

segment_cloth_request = SegmentClothAdvanceRequest()
segment_cloth_request.image_urlobject =im
segment_cloth_request.image_urlobject =imp1
#设置子类
if cloth_type != "None":
segment_cloth_request.cloth_class = [cloth_type]
Expand Down
77 changes: 77 additions & 0 deletions ALY_Seg_Obj.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@

MAX_RESOLUTION=8192

from typing import List

from .AlyVision import imagese
from alibabacloud_imageseg20191230.models import SegmentCommodityRequest
from alibabacloud_tea_util.models import RuntimeOptions

import os
import datetime
import numpy as np
import folder_paths
import comfy.model_base
from pathlib import Path
from urllib.request import urlopen
from collections import defaultdict
from PIL.PngImagePlugin import PngInfo
from PIL import Image, ImageDraw, ImageFont
import nodes

from .utils import *

comfy_path = os.path.dirname(folder_paths.__file__)
custom_nodes_path = os.path.join(comfy_path, "custom_nodes")



class ALY_Seg_Obj:

def __init__(self):
pass

@classmethod
def INPUT_TYPES(cls):
return {"required":
{
"image":("IMAGE", {"default": "","multiline": False}),
}
}

RETURN_TYPES = ("IMAGE",)
RETURN_NAMES = ("obj",)
OUTPUT_NODE = True
FUNCTION = "sample"
CATEGORY = "CXH"

def sample(self,image):

now = datetime.datetime.now()
date_str = now.strftime("%Y-%m-%d")
folder_path = os.path.join(custom_nodes_path,"Comfyui_ALY","cache",f"{date_str}.jpg")

# 零时缓存转换成阿里io.buff
save_tensor_image(image,folder_path)

imp1 = open(folder_path, 'rb')

segment_commodity_request = SegmentCommodityRequest()
segment_commodity_request.image_urlobject =imp1

runtime = RuntimeOptions()
try:
# 初始化Client
client = imagese.create_client_json()
response = client.segment_commodity_with_options_async(segment_commodity_request, runtime)
print(response)
image_url = response.body.data.image_url
print(image_url)
except Exception as error:
# 获取整体报错信息
print(error)
# 获取单个字段
print(error.code)

source_img = img_from_url(image_url)
return (source_img)
2 changes: 1 addition & 1 deletion AlyVision.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

comfy_path = os.path.dirname(folder_paths.__file__)
custom_nodes_path = os.path.join(comfy_path, "custom_nodes")
mine_path = os.path.join(custom_nodes_path, "Comfyui-Mine")
mine_path = os.path.join(custom_nodes_path, "Comfyui_ALY")
key_json = os.path.join(mine_path, "AssetKey.json")

class AlyVision_imageseg:
Expand Down
4 changes: 0 additions & 4 deletions AssetKey.json

This file was deleted.

51 changes: 0 additions & 51 deletions CXH_IMAGE.py

This file was deleted.

6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

1.CXH_GPT 节点 安装参考:https://www.bilibili.com/video/BV1A94y1A7Mr/

报错:ModuleNotFoundError: No module named 'gptcpp_node' 注意需要看视频结尾其实是需要comfyui_nodes节点,如果不需要GPT节点可以删除CXH_GPT.py,也到__init__.py把CXH_GPT删除
报错:ModuleNotFoundError: No module named 'gptcpp_node' 注意需要看视频结尾其实是需要 comfyui_nodes 节点,如果不需要 GPT 节点可以删除 CXH_GPT.py,也到**init**.py 把 CXH_GPT 删除

2.CXH_IMAGE 节点 接受一个图片路径输出图片

Expand All @@ -13,11 +13,11 @@
获取 key 填写到 AssetKey.json

(2).需要安装依赖:requirements.txt
python_embeded\python.exe pip install -r requirements.txt
python_embeded\python.exe pip install -r requirements.txt

如果阿里的库安装不上可以到群里获取库直接解压。

链接:https://pan.baidu.com/s/1Wt7fLMktnlwuDCrGeV7DqQ
链接:https://pan.baidu.com/s/1Wt7fLMktnlwuDCrGeV7DqQ
提取码:9c0f

![2da457a1d6d40ca81435c71f7f9a13f](https://github.com/StartHua/Comfyui-Mine/assets/22284244/39173f9d-629c-4766-a852-efb358c45d48)
12 changes: 6 additions & 6 deletions __init__.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
# from .mine_nodes import *
from .CXH_ALY_Seg_Cloth import *
from .CXH_IMAGE import *
from .ALY_Seg_Cloth import *
from .ALY_Seg_Obj import *

# A dictionary that contains all nodes you want to export with their names
# NOTE: names should be globally unique
NODE_CLASS_MAPPINGS = {
"CXH_ALY_Seg_Cloth":CXH_ALY_Seg_Cloth,
"CXH_IMAGE":CXH_IMAGE
"ALY_Seg_Cloth":ALY_Seg_Cloth,
"ALY_Seg_Obj":ALY_Seg_Obj
}

# A dictionary that contains the friendly/humanly readable titles for the nodes
NODE_DISPLAY_NAME_MAPPINGS = {
"CXH_ALY_Seg_Cloth":"CXH_ALY_Seg_Cloth",
"CXH_IMAGE":"CXH_IMAGE"
"ALY_Seg_Cloth":"ALY_Seg_Cloth",
"ALY_Seg_Obj":ALY_Seg_Obj
}
78 changes: 76 additions & 2 deletions utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,43 @@
import torch
import numpy as np
import io
from PIL import Image,ImageOps
from PIL import Image,ImageOps, ImageFilter
from urllib.request import urlopen
import datetime
import os
import cv2
import requests
from io import BytesIO

#comfyui自己的图像就是tensor
def tensor2pil(image):
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))

# Convert PIL to Tensor
def pil2tensor(image):
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)

# PIL to Mask
def pil2mask(image):
image_np = np.array(image.convert("L")).astype(np.float32) / 255.0
mask = torch.from_numpy(image_np)
return 1.0 - mask

# 保存tensor图片
def save_tensor_image(img,path):
pil_image = tensor2pil(img)
pil_image.save(path)

# Flutter ImageFilter 所使用的模糊算法高斯模糊
def gaussian_region(image, radius=5.0):
image = ImageOps.invert(image.convert("L"))
image = image.filter(ImageFilter.GaussianBlur(radius=int(radius)))
return image.convert("RGB")

# 网络下载图片
def download_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")

def img_from_path(path):
img = Image.open(path)
Expand All @@ -14,4 +49,43 @@ def img_from_path(path):

def img_from_url(url):
img = io.BytesIO(urlopen(url).read())
return img_from_path(img)
return img_from_path(img)

def save_images(img_list, folder):
if not os.path.exists(folder):
os.makedirs(folder)
now = datetime.datetime.now()
date_str = now.strftime("%Y-%m-%d")
folder_path = os.path.join(folder, date_str)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
time_str = now.strftime("%H_%M_%S")
for idx, img in enumerate(img_list):
image_number = idx + 1
filename = f"{time_str}_{image_number}.jpg"
save_path = os.path.join(folder_path, filename)
cv2.imwrite(save_path, img[..., ::-1])


def check_channels(image):
channels = image.shape[2] if len(image.shape) == 3 else 1
if channels == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif channels > 3:
image = image[:, :, :3]
return image


def resize_image(img, max_length=768):
height, width = img.shape[:2]
max_dimension = max(height, width)

if max_dimension > max_length:
scale_factor = max_length / max_dimension
new_width = int(round(width * scale_factor))
new_height = int(round(height * scale_factor))
new_size = (new_width, new_height)
img = cv2.resize(img, new_size)
height, width = img.shape[:2]
img = cv2.resize(img, (width-(width % 64), height-(height % 64)))
return img

0 comments on commit bd500a3

Please sign in to comment.