|
|
import json
|
|
|
import os
|
|
|
|
|
|
def get_relative_position(bbox, img_width, img_height):
|
|
|
"""
|
|
|
根据边界框在图像中的相对位置生成描述。
|
|
|
bbox: [x, y, width, height] (Label Studio 导出的百分比坐标)
|
|
|
img_width, img_height: 图像的原始宽度和高度
|
|
|
"""
|
|
|
if img_width == 0 or img_height == 0:
|
|
|
return "未知位置"
|
|
|
|
|
|
center_x_percent = bbox[0] + bbox[2] / 2
|
|
|
center_y_percent = bbox[1] + bbox[3] / 2
|
|
|
|
|
|
parts = []
|
|
|
if center_y_percent < 33.3:
|
|
|
parts.append("顶部")
|
|
|
elif center_y_percent > 66.6:
|
|
|
parts.append("底部")
|
|
|
else:
|
|
|
parts.append("中部")
|
|
|
|
|
|
if center_x_percent < 33.3:
|
|
|
parts.append("偏左")
|
|
|
elif center_x_percent > 66.6:
|
|
|
parts.append("偏右")
|
|
|
else:
|
|
|
parts.append("中心")
|
|
|
|
|
|
return "".join(parts) + "区域"
|
|
|
|
|
|
def generate_qwen_vl_data(label_studio_json_path, output_jsonl_path, image_base_dir):
|
|
|
"""
|
|
|
将 Label Studio 导出的 JSON 转换为 Qwen-VL 微调所需的 JSONL 格式。
|
|
|
|
|
|
Args:
|
|
|
label_studio_json_path (str): Label Studio 导出的 JSON 文件路径。
|
|
|
output_jsonl_path (str): 输出的 JSONL 文件路径。
|
|
|
image_base_dir (str): 图片文件所在的根目录,Label Studio JSON 中的路径是相对此目录的。
|
|
|
例如,如果JSON中是"/data/upload/1/image.jpg",而实际图片在"your_project_data/images/image.jpg",
|
|
|
那么image_base_dir就是"your_project_data/images"。
|
|
|
"""
|
|
|
with open(label_studio_json_path, 'r', encoding='utf-8') as f:
|
|
|
data = json.load(f)
|
|
|
|
|
|
qwen_vl_samples = []
|
|
|
|
|
|
for task in data:
|
|
|
image_relative_path = task['data']['image']
|
|
|
|
|
|
|
|
|
image_filename = os.path.basename(image_relative_path)
|
|
|
image_absolute_path = os.path.join(image_base_dir, image_filename).replace('\\', '/')
|
|
|
|
|
|
|
|
|
annotations = task.get('annotations', [])
|
|
|
if not annotations:
|
|
|
print(f"Warning: Task {task['id']} has no annotations. Skipping.")
|
|
|
continue
|
|
|
|
|
|
|
|
|
annotation_results = annotations[0].get('result', [])
|
|
|
if not annotation_results:
|
|
|
print(f"Warning: Task {task['id']} annotation has no results. Skipping.")
|
|
|
continue
|
|
|
|
|
|
component_type = "未知构件"
|
|
|
crack_type = "未知裂缝类型"
|
|
|
text_description = ""
|
|
|
bbox_descriptions = []
|
|
|
original_width = None
|
|
|
original_height = None
|
|
|
|
|
|
|
|
|
for res in annotation_results:
|
|
|
if res['from_name'] == 'componentClassification' and res['type'] == 'choices':
|
|
|
component_type = res['value']['choices'][0] if res['value']['choices'] else component_type
|
|
|
elif res['from_name'] == 'cracksClassification' and res['type'] == 'choices':
|
|
|
crack_type = res['value']['choices'][0] if res['value']['choices'] else crack_type
|
|
|
elif res['from_name'] == 'textTool' and res['type'] == 'textarea':
|
|
|
text_description = res['value']['text'][0] if res['value']['text'] else text_description
|
|
|
elif res['type'] in ['rectanglelabels', 'polygonlabels']:
|
|
|
|
|
|
if original_width is None:
|
|
|
original_width = res.get('original_width', 0)
|
|
|
original_height = res.get('original_height', 0)
|
|
|
|
|
|
if original_width == 0 and annotations[0].get('result'):
|
|
|
for r_sub in annotations[0]['result']:
|
|
|
if r_sub.get('original_width') and r_sub.get('original_height'):
|
|
|
original_width = r_sub['original_width']
|
|
|
original_height = r_sub['original_height']
|
|
|
break
|
|
|
|
|
|
if res['type'] == 'rectanglelabels':
|
|
|
bbox = res['value']
|
|
|
|
|
|
x_percent = bbox['x']
|
|
|
y_percent = bbox['y']
|
|
|
width_percent = bbox['width']
|
|
|
height_percent = bbox['height']
|
|
|
|
|
|
label = bbox['rectanglelabels'][0] if bbox['rectanglelabels'] else "区域"
|
|
|
|
|
|
|
|
|
position_desc = get_relative_position([x_percent, y_percent, width_percent, height_percent], original_width, original_height)
|
|
|
bbox_descriptions.append(f"一个 '{label}' 位于图像的 {position_desc}。")
|
|
|
|
|
|
elif res['type'] == 'polygonlabels':
|
|
|
|
|
|
|
|
|
points = res['value']['points']
|
|
|
if points:
|
|
|
|
|
|
avg_x = sum([p[0] for p in points]) / len(points)
|
|
|
avg_y = sum([p[1] for p in points]) / len(points)
|
|
|
|
|
|
label = res['value']['polygonlabels'][0] if res['value']['polygonlabels'] else "区域"
|
|
|
position_desc = get_relative_position([avg_x, avg_y, 0, 0], original_width, original_height)
|
|
|
bbox_descriptions.append(f"一个 '{label}' 区域位于图像的 {position_desc}。")
|
|
|
|
|
|
|
|
|
|
|
|
instruction = "请描述这张图片中的所有裂缝信息,包括构件、裂缝类型、位置和详细描述。"
|
|
|
|
|
|
response_parts = []
|
|
|
if component_type != "未知构件":
|
|
|
response_parts.append(f"图片显示的是一个 '{component_type}'。")
|
|
|
|
|
|
if crack_type != "未知裂缝类型":
|
|
|
response_parts.append(f"主要裂缝类型是 '{crack_type}'。")
|
|
|
|
|
|
if text_description:
|
|
|
response_parts.append(f"详细情况描述为:'{text_description}'。")
|
|
|
|
|
|
if bbox_descriptions:
|
|
|
response_parts.append("图中发现以下缺陷区域:")
|
|
|
for desc in bbox_descriptions:
|
|
|
response_parts.append(f"- {desc}")
|
|
|
|
|
|
if not response_parts:
|
|
|
print(f"Warning: Task {task['id']} has no meaningful annotations to generate a response. Skipping.")
|
|
|
continue
|
|
|
|
|
|
response = " ".join(response_parts)
|
|
|
|
|
|
|
|
|
qwen_vl_sample = {
|
|
|
"image": image_absolute_path,
|
|
|
"conversations": [
|
|
|
{"from": "user", "value": instruction},
|
|
|
{"from": "assistant", "value": response}
|
|
|
]
|
|
|
}
|
|
|
qwen_vl_samples.append(qwen_vl_sample)
|
|
|
|
|
|
with open(output_jsonl_path, 'w', encoding='utf-8') as f:
|
|
|
for sample in qwen_vl_samples:
|
|
|
f.write(json.dumps(sample, ensure_ascii=False) + '\n')
|
|
|
|
|
|
print(f"成功生成 {len(qwen_vl_samples)} 条 Qwen-VL 微调数据到 {output_jsonl_path}")
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
|
label_studio_json_file = 'annotations.json'
|
|
|
|
|
|
output_jsonl_file = 'annotations.jsonl'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
image_base_directory = 'images'
|
|
|
|
|
|
|
|
|
generate_qwen_vl_data(label_studio_json_file, output_jsonl_file, image_base_directory)
|
|
|
|
|
|
|
|
|
with open(output_jsonl_file, 'r', encoding='utf-8') as f:
|
|
|
first_sample = json.loads(f.readline())
|
|
|
print("\n--- 第一个生成的样本示例 ---")
|
|
|
print(json.dumps(first_sample, indent=2, ensure_ascii=False))
|
|
|
|
|
|
|