Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -92,6 +92,45 @@ custom_css = """
|
|
| 92 |
text-align: center;
|
| 93 |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
|
| 94 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
"""
|
| 96 |
|
| 97 |
class ImageProcessor:
|
|
@@ -110,261 +149,30 @@ class ImageProcessor:
|
|
| 110 |
self.model.to(device)
|
| 111 |
self.model.eval()
|
| 112 |
|
| 113 |
-
def
|
| 114 |
-
"""
|
| 115 |
-
if
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
time_start = time.time()
|
| 141 |
-
|
| 142 |
-
# 推理
|
| 143 |
-
with torch.no_grad():
|
| 144 |
-
x1, res, s1_sig, edg1, edg_s, s2, e2, s2_sig, e2_sig, s3, e3, s3_sig, e3_sig, s4, e4, s4_sig, e4_sig, s5, e5, s5_sig, e5_sig, sk1, sk1_sig, sk2, sk2_sig, sk3, sk3_sig, sk4, sk4_sig, sk5, sk5_sig = self.model(image_tensor)
|
| 145 |
-
|
| 146 |
-
time_end = time.time()
|
| 147 |
-
inference_time = time_end - time_start
|
| 148 |
-
|
| 149 |
-
# 处理输出结果
|
| 150 |
-
res = res.sigmoid().data.cpu().numpy().squeeze()
|
| 151 |
-
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
|
| 152 |
-
|
| 153 |
-
# 调整大小
|
| 154 |
-
h, w = original_image.shape[:2]
|
| 155 |
-
res_resized = cv2.resize(res, (w, h))
|
| 156 |
-
|
| 157 |
-
# 应用阈值
|
| 158 |
-
res_vis = (res_resized * 255).astype(np.uint8)
|
| 159 |
-
|
| 160 |
-
# 创建热力图
|
| 161 |
-
heatmap = cv2.applyColorMap(res_vis, cv2.COLORMAP_JET)
|
| 162 |
-
|
| 163 |
-
# 二值化处理
|
| 164 |
-
_, binary_mask = cv2.threshold(res_vis, int(255 * threshold), 255, cv2.THRESH_BINARY)
|
| 165 |
-
|
| 166 |
-
# 叠加结果
|
| 167 |
-
alpha = 0.5
|
| 168 |
-
if len(original_image.shape) == 3 and original_image.shape[2] == 3:
|
| 169 |
-
original_bgr = cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR)
|
| 170 |
-
else:
|
| 171 |
-
original_bgr = cv2.cvtColor(original_image, cv2.COLOR_GRAY2BGR)
|
| 172 |
-
|
| 173 |
-
overlayed = cv2.addWeighted(original_bgr, 1-alpha, heatmap, alpha, 0)
|
| 174 |
-
segmented = cv2.bitwise_and(original_bgr, original_bgr, mask=binary_mask)
|
| 175 |
-
|
| 176 |
-
# 转回RGB格式
|
| 177 |
-
overlayed_rgb = cv2.cvtColor(overlayed, cv2.COLOR_BGR2RGB)
|
| 178 |
-
segmented_rgb = cv2.cvtColor(segmented, cv2.COLOR_BGR2RGB)
|
| 179 |
-
|
| 180 |
-
# 计算统计信息
|
| 181 |
-
stats = {
|
| 182 |
-
"处理分辨率": f"{w}x{h}",
|
| 183 |
-
"检测目标数量": str(len(cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0])),
|
| 184 |
-
"平均置信度": f"{np.mean(res_resized):.2%}",
|
| 185 |
-
"最大置信度": f"{np.max(res_resized):.2%}"
|
| 186 |
-
}
|
| 187 |
-
|
| 188 |
-
return original_image, res_vis, heatmap, overlayed_rgb, segmented_rgb, f"推理时间: {inference_time:.4f}秒", stats
|
| 189 |
-
|
| 190 |
-
# 创建处理器实例
|
| 191 |
-
processor = ImageProcessor()
|
| 192 |
-
|
| 193 |
-
def run_demo(input_image, threshold, enhance_contrast, denoise, show_contours):
|
| 194 |
-
"""增强的主处理函数"""
|
| 195 |
-
if input_image is None:
|
| 196 |
-
return [None] * 7 + ["请上传图片"]
|
| 197 |
-
|
| 198 |
-
# 处理图像
|
| 199 |
-
results = processor.process_image(
|
| 200 |
-
input_image,
|
| 201 |
-
threshold=threshold/100.0,
|
| 202 |
-
enhance_contrast=enhance_contrast,
|
| 203 |
-
denoise=denoise
|
| 204 |
-
)
|
| 205 |
-
|
| 206 |
-
original, saliency_map, heatmap, overlayed, segmented, time_info, stats = results
|
| 207 |
-
|
| 208 |
-
# 添加轮廓显示
|
| 209 |
-
if show_contours and saliency_map is not None:
|
| 210 |
-
_, binary = cv2.threshold(saliency_map, 127, 255, cv2.THRESH_BINARY)
|
| 211 |
-
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 212 |
-
overlay_with_contours = overlayed.copy()
|
| 213 |
-
cv2.drawContours(overlay_with_contours, contours, -1, (0,255,0), 2)
|
| 214 |
-
overlayed = overlay_with_contours
|
| 215 |
-
|
| 216 |
-
# 生成统计信息HTML
|
| 217 |
-
stats_html = "<div class='statistics-container'>"
|
| 218 |
-
for key, value in stats.items():
|
| 219 |
-
stats_html += f"<div class='statistic-card'><h4>{key}</h4><p>{value}</p></div>"
|
| 220 |
-
stats_html += "</div>"
|
| 221 |
-
|
| 222 |
-
return original, saliency_map, heatmap, overlayed, segmented, time_info, stats_html
|
| 223 |
-
|
| 224 |
-
# 创建Gradio界面
|
| 225 |
-
with gr.Blocks(title="高级显著性目标检测系统", css=custom_css) as demo:
|
| 226 |
-
gr.Markdown(
|
| 227 |
-
"""
|
| 228 |
-
# 🎯 智能显著性目标检测系统
|
| 229 |
-
### 基于深度学习的图像显著性检测与分析工具
|
| 230 |
-
"""
|
| 231 |
-
)
|
| 232 |
-
|
| 233 |
-
with gr.Tabs() as tabs:
|
| 234 |
-
with gr.TabItem("🔍 主要功能"):
|
| 235 |
-
with gr.Row():
|
| 236 |
-
with gr.Column(scale=1):
|
| 237 |
-
input_image = gr.Image(
|
| 238 |
-
label="输入图像",
|
| 239 |
-
type="numpy",
|
| 240 |
-
elem_classes="input-image"
|
| 241 |
-
)
|
| 242 |
-
with gr.Group(elem_classes="slider-component"):
|
| 243 |
-
threshold_slider = gr.Slider(
|
| 244 |
-
minimum=0,
|
| 245 |
-
maximum=100,
|
| 246 |
-
value=50,
|
| 247 |
-
step=1,
|
| 248 |
-
label="检测阈值",
|
| 249 |
-
info="调整检测的灵敏度"
|
| 250 |
-
)
|
| 251 |
-
enhance_contrast = gr.Checkbox(
|
| 252 |
-
label="增强对比度",
|
| 253 |
-
value=False
|
| 254 |
-
)
|
| 255 |
-
denoise = gr.Checkbox(
|
| 256 |
-
label="降噪处理",
|
| 257 |
-
value=False
|
| 258 |
-
)
|
| 259 |
-
show_contours = gr.Checkbox(
|
| 260 |
-
label="显示轮廓",
|
| 261 |
-
value=True
|
| 262 |
-
)
|
| 263 |
-
|
| 264 |
-
submit_btn = gr.Button(
|
| 265 |
-
"开始检测",
|
| 266 |
-
variant="primary",
|
| 267 |
-
elem_classes="custom-button"
|
| 268 |
-
)
|
| 269 |
-
|
| 270 |
-
with gr.Column(scale=2):
|
| 271 |
-
with gr.Tabs():
|
| 272 |
-
with gr.TabItem("检测结果"):
|
| 273 |
-
with gr.Row():
|
| 274 |
-
original_output = gr.Image(
|
| 275 |
-
label="原始图像",
|
| 276 |
-
elem_classes="output-image"
|
| 277 |
-
)
|
| 278 |
-
saliency_output = gr.Image(
|
| 279 |
-
label="显著性图",
|
| 280 |
-
elem_classes="output-image"
|
| 281 |
-
)
|
| 282 |
-
with gr.Row():
|
| 283 |
-
heatmap_output = gr.Image(
|
| 284 |
-
label="热力图分析",
|
| 285 |
-
elem_classes="output-image"
|
| 286 |
-
)
|
| 287 |
-
overlayed_output = gr.Image(
|
| 288 |
-
label="叠加效果",
|
| 289 |
-
elem_classes="output-image"
|
| 290 |
-
)
|
| 291 |
-
|
| 292 |
-
segmented_output = gr.Image(
|
| 293 |
-
label="目标分割",
|
| 294 |
-
elem_classes="output-image"
|
| 295 |
-
)
|
| 296 |
-
|
| 297 |
-
with gr.Group(elem_classes="info-box"):
|
| 298 |
-
time_info = gr.Textbox(
|
| 299 |
-
label="处理时间",
|
| 300 |
-
show_label=True
|
| 301 |
-
)
|
| 302 |
-
stats_output = gr.HTML(
|
| 303 |
-
label="统计信息"
|
| 304 |
-
)
|
| 305 |
-
|
| 306 |
-
with gr.TabItem("使用指南"):
|
| 307 |
-
gr.Markdown(
|
| 308 |
-
"""
|
| 309 |
-
## 📖 使用说明
|
| 310 |
-
1. **上传图片**: 点击左侧"输入图像"区域上传待分析的图片
|
| 311 |
-
2. **调整参数**: 使用阈值滑块调整检测的灵敏度
|
| 312 |
-
3. **开始检测**: 点击"开始检测"按钮进行分析
|
| 313 |
-
4. **查看结果**: 系统将显示多个维度的分析结果
|
| 314 |
-
|
| 315 |
-
## 🎨 输出说明
|
| 316 |
-
- **显著性图**: 展示目标区域的重要性分布
|
| 317 |
-
- **热力图**: 使用色彩展示检测强度
|
| 318 |
-
- **叠加效果**: 将检测结果与原图叠加展示
|
| 319 |
-
- **目标分割**: 提取关键目标区域
|
| 320 |
-
|
| 321 |
-
## �� 技术特点
|
| 322 |
-
- 实时处理:快速准确的目标检测
|
| 323 |
-
- 多维分析:提供多角度的可视化结果
|
| 324 |
-
- 交互式操作:支持参数实时调整
|
| 325 |
-
"""
|
| 326 |
-
)
|
| 327 |
-
|
| 328 |
-
with gr.TabItem("关于项目"):
|
| 329 |
-
gr.Markdown(
|
| 330 |
-
"""
|
| 331 |
-
## 🌟 项目信息
|
| 332 |
-
- **版本**: 1.0.0
|
| 333 |
-
- **技术架构**: PyTorch + Gradio
|
| 334 |
-
- **模型**: CyueNet
|
| 335 |
-
|
| 336 |
-
## 📊 性能指标
|
| 337 |
-
- 平均处理时间: <1s
|
| 338 |
-
- 准确率: >95%
|
| 339 |
-
|
| 340 |
-
## 🔗 相关链接
|
| 341 |
-
- [项目主页](https://github.com/your-repo)
|
| 342 |
-
- [技术文档](https://your-docs)
|
| 343 |
-
- [问题反馈](https://github.com/your-repo/issues)
|
| 344 |
-
"""
|
| 345 |
-
)
|
| 346 |
-
|
| 347 |
-
# 设置事件处理
|
| 348 |
-
submit_btn.click(
|
| 349 |
-
fn=run_demo,
|
| 350 |
-
inputs=[
|
| 351 |
-
input_image,
|
| 352 |
-
threshold_slider,
|
| 353 |
-
enhance_contrast,
|
| 354 |
-
denoise,
|
| 355 |
-
show_contours
|
| 356 |
-
],
|
| 357 |
-
outputs=[
|
| 358 |
-
original_output,
|
| 359 |
-
saliency_output,
|
| 360 |
-
heatmap_output,
|
| 361 |
-
overlayed_output,
|
| 362 |
-
segmented_output,
|
| 363 |
-
time_info,
|
| 364 |
-
stats_output
|
| 365 |
-
]
|
| 366 |
-
)
|
| 367 |
-
|
| 368 |
-
# 启动应用
|
| 369 |
-
if __name__ == "__main__":
|
| 370 |
-
demo.launch(share=True)
|
|
|
|
| 92 |
text-align: center;
|
| 93 |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
|
| 94 |
}
|
| 95 |
+
|
| 96 |
+
.accordion {
|
| 97 |
+
background: white;
|
| 98 |
+
border-radius: var(--border-radius);
|
| 99 |
+
margin: 10px 0;
|
| 100 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.05);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
.accordion-header {
|
| 104 |
+
padding: 15px;
|
| 105 |
+
cursor: pointer;
|
| 106 |
+
transition: background-color 0.3s ease;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
.accordion-header:hover {
|
| 110 |
+
background-color: var(--background-color);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
.result-card {
|
| 114 |
+
background: white;
|
| 115 |
+
padding: 20px;
|
| 116 |
+
border-radius: var(--border-radius);
|
| 117 |
+
margin: 10px 0;
|
| 118 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.05);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
.analysis-container {
|
| 122 |
+
display: grid;
|
| 123 |
+
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
| 124 |
+
gap: 20px;
|
| 125 |
+
margin: 20px 0;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
.chart-container {
|
| 129 |
+
height: 300px;
|
| 130 |
+
background: white;
|
| 131 |
+
border-radius: var(--border-radius);
|
| 132 |
+
padding: 15px;
|
| 133 |
+
}
|
| 134 |
"""
|
| 135 |
|
| 136 |
class ImageProcessor:
|
|
|
|
| 149 |
self.model.to(device)
|
| 150 |
self.model.eval()
|
| 151 |
|
| 152 |
+
def adjust_brightness_contrast(self, image, brightness=0, contrast=0):
|
| 153 |
+
"""调整图像亮度和对比度"""
|
| 154 |
+
if brightness != 0:
|
| 155 |
+
if brightness > 0:
|
| 156 |
+
shadow = brightness
|
| 157 |
+
highlight = 255
|
| 158 |
+
else:
|
| 159 |
+
shadow = 0
|
| 160 |
+
highlight = 255 + brightness
|
| 161 |
+
alpha_b = (highlight - shadow)/255
|
| 162 |
+
gamma_b = shadow
|
| 163 |
+
image = cv2.addWeighted(image, alpha_b, image, 0, gamma_b)
|
| 164 |
+
if contrast != 0:
|
| 165 |
+
f = 131*(contrast + 127)/(127*(131-contrast))
|
| 166 |
+
alpha_c = f
|
| 167 |
+
gamma_c = 127*(1-f)
|
| 168 |
+
image = cv2.addWeighted(image, alpha_c, image, 0, gamma_c)
|
| 169 |
+
return image
|
| 170 |
+
|
| 171 |
+
def apply_filters(self, image, filter_type):
|
| 172 |
+
"""应用图像滤镜效果"""
|
| 173 |
+
if filter_type == "锐化":
|
| 174 |
+
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
|
| 175 |
+
return cv2.filter2D(image, -1, kernel)
|
| 176 |
+
elif filter_type == "柔化":
|
| 177 |
+
return cv2.GaussianBlur(image, (5,5), 0)
|
| 178 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|