现在只需要做一个小的修改。将json_input_v2中最后一个"reference_path"作为用户在主输入中传入的"reference_path"值。
例如:
[
{
“reference_path”: “/root/onethingai-tmp/comfyui-tmp/哈吉斯第五批/男上衣/1合集”
},
{
“reference_path”: “/root/onethingai-tmp/comfyui-tmp/哈吉斯第五批/男上衣/2合集”
},
{
“reference_path”: “/root/onethingai-tmp/comfyui-tmp/哈吉斯第五批/男上衣/3合集”
}
]
单独拿出最后一个"reference_path": "/root/onethingai-tmp/comfyui-tmp/哈吉斯第五批/男上衣/3合集"替换掉主输入的值。然后执行剩下的。
就仅执行:
[
{
“reference_path”: “/root/onethingai-tmp/comfyui-tmp/哈吉斯第五批/男上衣/1合集”
},
{
“reference_path”: “/root/onethingai-tmp/comfyui-tmp/哈吉斯第五批/男上衣/2合集”
}
]
返回给我修正完成的完整代码,其他东西就不要优化了,已经很好用了。然后不要做任何省略,要注意之前的代码,不要做额外的修改。
import os
import json
import random
import re
import itertools
from collections import defaultdict
from typing import List, Tuple, Dict, Set
class PatnMatchingNode:
def __init__(self):
self.all_notes = [] # 存储所有可用note路径
self.used_notes = set()
self.specified_notes = defaultdict(list) # 存储指定参考衣服的网红note路径
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"reference_path": ("STRING", {
"default": "",
"tooltip": "参考衣服根目录路径(包含各类服装的文件夹)"
}),
"target_path": ("STRING", {
"default": "",
"tooltip": "网红照片根目录路径(包含各类网红照片的文件夹)"
}),
"seed": ("INT", {
"default": 0,
"min": 0,
"max": 0xffffffffffffffff,
"tooltip": "随机种子值(控制网红文件夹选择的随机性)"
}),
"batch_count": ("INT", {
"default": 1,
"min": 1,
"max": 100,
"tooltip": "批量处理次数(重复处理参考路径的次数)"
}),
"multiplier": ("INT", {
"default": 1,
"min": 1,
"max": 10,
"tooltip": "路径扩展倍数(最终输出的图像路径的复制倍数)"
}),
"X_add": ("INT", {
"default": 0,
"min": 0,
"max": 100,
"tooltip": "X值补偿(调整(括号内数字)_额外增加的组数)"
}),
"note_limit": ("INT", {
"default": 3,
"min": 0,
"max": 100,
"tooltip": "网红使用限制(每个网红最多使用的note数量)"
}),
"auto_add_suffix": ("BOOLEAN", {
"default": False,
"tooltip": "是否自动给note文件夹增加'已使用'后缀",
"button": True
}),
"filter_used_notes": ("BOOLEAN", {
"default": False,
"tooltip": "是否过滤已使用note(包括后缀标记和JSON记录)",
"button": True
}),
"reset_used_tags": ("BOOLEAN", {
"default": False,
"tooltip": "是否剔除所有已使用标签(移除后缀并重置记录,优先级最高)",
"button": True
}),
},
"optional": {
"used_json": ("STRING", {
"default": "",
"tooltip": "已使用记录(JSON格式保存的已使用网红路径)"
}),
"json_input_v2": ("STRING", {
"default": "",
"tooltip": "JSON格式输入,包含多个reference_path参数"
}),
}
}
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING")
RETURN_NAMES = ("参考衣服路径", "目标图像路径", "路径数量", "已使用_Json", "json_input_v2_output")
FUNCTION = "match_paths"
CATEGORY = "Sanmi Nodes/Customized Nodes"
def normalize_path(self, path: str) -> str:
"""规范化路径,去除冗余部分"""
return os.path.normpath(path)
def has_reference_brackets(self, folder_name: str) -> bool:
"""检查参考衣服文件夹名是否包含括号"""
return '(' in folder_name or '(' in folder_name
def load_used_data(self, json_str: str) -> set:
"""从JSON字符串加载已使用的note路径"""
if not json_str:
return set()
try:
data = json.loads(json_str)
return {self.normalize_path(p) for p in data.get("used_notes", [])}
except:
return set()
def extract_ref_name(self, folder_name: str) -> str:
"""提取参考衣服名字"""
# 剔除开头数字和下划线
without_prefix = re.sub(r'^\d+_', '', folder_name)
# 提取括号前的内容
ref_name = re.split(r'[((]', without_prefix, 1)[0]
return ref_name
def extract_influencer_ref(self, influencer_name: str) -> Tuple[str, str]:
"""提取网红名和指定的参考衣服名"""
parts = influencer_name.rsplit('_', 1)
if len(parts) == 2:
return parts[0], parts[1]
return influencer_name, None
def collect_notes(self, target_root: str, existing_used: set, filter_used_notes: bool):
"""收集未被使用的note路径,分离出指定参考衣服的网红"""
self.all_notes = [] # 普通网红note路径
self.specified_notes = defaultdict(list) # 重置指定参考衣服的网红
# 遍历所有网红文件夹
for influencer in os.listdir(target_root):
influencer_path = self.normalize_path(os.path.join(target_root, influencer))
if not os.path.isdir(influencer_path):
continue
# 提取网红名和可能的指定参考衣服名
original_name, ref_spec = self.extract_influencer_ref(influencer)
# 遍历该网红下的note文件夹
for note in os.listdir(influencer_path):
note_path = self.normalize_path(os.path.join(influencer_path, note))
if not os.path.isdir(note_path):
continue
# 检查是否已使用(根据后缀判断)
is_used_by_suffix = note.endswith('已使用')
# 如果启用了过滤已使用note,并且note已被使用(通过后缀或JSON记录),则跳过
if filter_used_notes and (is_used_by_suffix or note_path in existing_used):
continue
# 如果网红名包含指定参考衣服,则加入specified_notes
if ref_spec:
self.specified_notes[ref_spec].append(note_path)
else:
self.all_notes.append(note_path)
def select_notes(self, candidate_list: List[str], x: int, note_limit: int,
existing_counts: Dict[str, int], current_run_counts: Dict[str, int],
target_path: str) -> Tuple[List[str], int]:
"""
从候选列表中挑选note路径
candidate_list: 候选note路径列表
x: 需要选择的note数量
"""
selected = []
missing = 0
remaining = x
# 按网红分组并过滤可用note
influencer_notes = defaultdict(list)
for note_path in candidate_list:
# 已使用的跳过
if note_path in self.used_notes:
continue
# 获取网红名称
rel_path = os.path.relpath(note_path, target_path)
influencer = rel_path.split(os.sep)[0]
# 检查使用限制
total_used = existing_counts.get(influencer, 0) + current_run_counts.get(influencer, 0)
if total_used < note_limit:
influencer_notes[influencer].append(note_path)
# 多轮选择直到满足需求或无法继续
while remaining > 0:
candidates = list(influencer_notes.keys())
if not candidates:
missing = remaining
break
random.shuffle(candidates)
added_this_round = 0
for influencer in candidates:
if not influencer_notes[influencer]:
continue
# 选择该网红的一个note
note_path = influencer_notes[influencer].pop(0)
selected.append(note_path)
self.used_notes.add(note_path)
# 更新计数
current_run_counts[influencer] = current_run_counts.get(influencer, 0) + 1
remaining -= 1
added_this_round += 1
# 检查是否达到使用限制
total_used = existing_counts.get(influencer, 0) + current_run_counts[influencer]
if total_used >= note_limit:
del influencer_notes[influencer]
if remaining <= 0:
break
if added_this_round == 0:
missing = remaining
break
return selected, missing
def process_images(self, note_path: str, auto_add_suffix: bool) -> List[str]:
"""处理单个note图像,如果需要则添加已使用后缀"""
images = []
# 如果需要自动添加后缀,则重命名文件夹
if auto_add_suffix and not note_path.endswith('已使用'):
parent_dir = os.path.dirname(note_path)
base_name = os.path.basename(note_path)
new_note_path = os.path.join(parent_dir, f"{base_name}_已使用")
try:
os.rename(note_path, new_note_path)
note_path = new_note_path # 更新为新的路径
except OSError as e:
print(f"重命名文件夹失败: {e}")
# 收集图像文件
for fname in os.listdir(note_path):
if fname.lower().endswith(('png', 'jpg', 'jpeg', 'webp')):
images.append(self.normalize_path(os.path.join(note_path, fname)))
return sorted(images)
def reset_used_tags(self, target_path: str):
"""重置所有已使用标签,移除后缀并清空记录"""
# 遍历所有网红文件夹
for influencer in os.listdir(target_path):
influencer_path = self.normalize_path(os.path.join(target_path, influencer))
if not os.path.isdir(influencer_path):
continue
# 遍历该网红下的note文件夹
for note in os.listdir(influencer_path):
note_path = self.normalize_path(os.path.join(influencer_path, note))
if not os.path.isdir(note_path):
continue
# 检查是否包含"已使用"后缀
if '已使用' in note:
# 移除后缀
new_note_name = note.replace('_已使用', '')
new_note_path = self.normalize_path(os.path.join(influencer_path, new_note_name))
try:
os.rename(note_path, new_note_path)
except OSError as e:
print(f"重置文件夹名称失败: {e}")
def simulate_execution(self, reference_path: str, target_path: str, seed: int,
batch_count: int, multiplier: int, X_add: int, note_limit: int,
used_json: str = "") -> str:
"""模拟执行,用于累积used_json"""
random.seed(seed)
# 加载历史使用数据
existing_used = self.load_used_data(used_json)
self.used_notes = existing_used.copy()
# 统计历史使用次数
existing_counts = defaultdict(int)
for note_path in existing_used:
rel_path = os.path.relpath(note_path, target_path)
influencer = rel_path.split(os.sep)[0]
existing_counts[influencer] += 1
# 收集可用note (分离普通网红和指定参考衣服的网红)
# 模拟执行时filter_used_notes默认为True
self.collect_notes(target_path, existing_used, True)
# 准备候选列表(普通网红)
available_notes = self.all_notes.copy()
current_run_counts = defaultdict(int)
# 开始批量处理
for _ in range(batch_count):
clothing_folders = [
self.normalize_path(os.path.join(reference_path, f))
for f in os.listdir(reference_path)
if os.path.isdir(self.normalize_path(os.path.join(reference_path, f)))
]
for cloth_folder in clothing_folders:
folder_name = os.path.basename(cloth_folder)
# 关键修改:跳过没有括号的参考衣服
if not self.has_reference_brackets(folder_name):
continue # 完全跳过不处理
ref_name = self.extract_ref_name(folder_name)
x = max(0, self.extract_group_number(folder_name, X_add))
selected_notes = []
# 步骤1: 从指定参考衣服的网红中挑选
if ref_name in self.specified_notes:
spec_candidates = self.specified_notes[ref_name].copy()
selected, missing = self.select_notes(
spec_candidates, x, note_limit,
existing_counts, current_run_counts, target_path
)
selected_notes.extend(selected)
x -= len(selected)
# 步骤2: 如果数量不足,从普通网红中挑选剩余数量
if x > 0:
selected, missing = self.select_notes(
available_notes, x, note_limit,
existing_counts, current_run_counts, target_path
)
selected_notes.extend(selected)
# 处理目标图像(模拟执行时不实际处理图像,只记录使用的note)
for note in selected_notes:
self.process_images(note, False) # 模拟执行时auto_add_suffix为False
# 生成使用记录
return json.dumps({"used_notes": list(self.used_notes)}, ensure_ascii=False, indent=2)
def match_paths(self, reference_path: str, target_path: str, seed: int,
batch_count: int, multiplier: int, X_add: int, note_limit: int,
auto_add_suffix: bool, filter_used_notes: bool, reset_used_tags: bool,
used_json: str = "", json_input_v2: str = "") -> Tuple[str, str, str, str, str]:
"""主函数:匹配参考衣服和网红照片路径"""
# 处理json_input_v2模拟执行
all_reference_paths = []
cumulative_used_json = used_json
if json_input_v2:
try:
input_data = json.loads(json_input_v2)
for item in input_data:
if "reference_path" in item:
all_reference_paths.append(item["reference_path"])
# 模拟执行每个reference_path
cumulative_used_json = self.simulate_execution(
reference_path=item["reference_path"],
target_path=target_path,
seed=seed,
batch_count=batch_count,
multiplier=multiplier,
X_add=X_add,
note_limit=note_limit,
used_json=cumulative_used_json
)
except Exception as e:
print(f"解析json_input_v2时出错: {e}")
# 添加当前实际执行的reference_path
all_reference_paths.append(reference_path)
# 生成json_input_v2格式的输出
json_output = json.dumps([{"reference_path": path} for path in all_reference_paths],
ensure_ascii=False, indent=2)
# 实际执行
random.seed(seed)
total_missing = 0
# 如果启用重置标签,先执行重置操作
if reset_used_tags:
self.reset_used_tags(target_path)
existing_used = set()
else:
# 加载历史使用数据(包括模拟执行累积的)
existing_used = self.load_used_data(cumulative_used_json)
self.used_notes = existing_used.copy()
# 统计历史使用次数
existing_counts = defaultdict(int)
for note_path in existing_used:
rel_path = os.path.relpath(note_path, target_path)
influencer = rel_path.split(os.sep)[0]
existing_counts[influencer] += 1
# 收集可用note (分离普通网红和指定参考衣服的网红)
self.collect_notes(target_path, existing_used, filter_used_notes)
# 准备候选列表(普通网红)
available_notes = self.all_notes.copy()
current_run_counts = defaultdict(int)
raw_clothes, raw_targets = [], []
# 开始批量处理
for _ in range(batch_count):
clothing_folders = [
self.normalize_path(os.path.join(reference_path, f))
for f in os.listdir(reference_path)
if os.path.isdir(self.normalize_path(os.path.join(reference_path, f)))
]
for cloth_folder in clothing_folders:
folder_name = os.path.basename(cloth_folder)
# 关键修改:跳过没有括号的参考衣服
if not self.has_reference_brackets(folder_name):
continue # 完全跳过不处理
ref_name = self.extract_ref_name(folder_name)
x = max(0, self.extract_group_number(folder_name, X_add))
selected_notes = []
# 步骤1: 从指定参考衣服的网红中挑选
if ref_name in self.specified_notes:
spec_candidates = self.specified_notes[ref_name].copy()
selected, missing = self.select_notes(
spec_candidates, x, note_limit,
existing_counts, current_run_counts, target_path
)
selected_notes.extend(selected)
x -= len(selected)
# 步骤2: 如果数量不足,从普通网红中挑选剩余数量
if x > 0:
selected, missing = self.select_notes(
available_notes, x, note_limit,
existing_counts, current_run_counts, target_path
)
selected_notes.extend(selected)
total_missing += missing
# 处理目标图像
target_images = list(itertools.chain.from_iterable(
self.process_images(note, auto_add_suffix) for note in selected_notes
))
# 生成参考路径
clothes = []
for img_path in target_images:
fname = os.path.basename(img_path).lower()
# 确定基础文件名
base_name = (
'左侧半' if 'lleft' in fname else # 新增左侧半
'右侧半' if 'rright' in fname else # 新增右侧半
'左侧' if 'left' in fname else
'右侧' if 'right' in fname else
'背面' if 'back' in fname else
'正面'
)
# 尝试两种图像格式
for ext in ['.jpg', '.png']:
cloth = self.normalize_path(os.path.join(cloth_folder, base_name + ext))
if os.path.exists(cloth):
clothes.append(cloth)
break
raw_clothes.extend(clothes)
raw_targets.extend(target_images)
# 应用倍数因子
clothes_paths = [p for p in raw_clothes for _ in range(multiplier)]
target_paths = [p for p in raw_targets for _ in range(multiplier)]
final_count = len(clothes_paths)
# 构建路径数量信息
count_info = []
if total_missing > 0:
count_info.append(f"网红note数量不足,无法满足所有的参考衣服,缺少了{total_missing}个note。")
count_info.append(f"{final_count}")
# 生成使用记录(包含模拟执行和实际执行的所有记录)
new_used_json = json.dumps({"used_notes": list(self.used_notes)},
ensure_ascii=False, indent=2)
return (
"\n".join(clothes_paths),
"\n".join(target_paths),
"\n".join(count_info),
new_used_json,
json_output
)
def extract_group_number(self, folder_name: str, X_add: int) -> int:
"""提取文件夹编码差值"""
# 尝试匹配括号内的数字
bracket_match = re.search(r'[((].*?(\d+).*?[))]', folder_name)
if not bracket_match:
return 0 # 没有括号内数字,返回0
# 只匹配"_"前的数值
underscore_match = re.search(r'^(\d+)_', folder_name)
base_num = 0
if underscore_match:
base_num = int(underscore_match.group(1))
# 提取括号内数字
bracket_num = int(bracket_match.group(1))
return bracket_num - base_num + X_add