SmartHeal commited on
Commit
094b1a4
Β·
verified Β·
1 Parent(s): 9eef931

Update src/ai_processor.py

Browse files
Files changed (1) hide show
  1. src/ai_processor.py +961 -205
src/ai_processor.py CHANGED
@@ -1,236 +1,992 @@
 
 
 
 
1
  import os
2
  import logging
 
 
 
 
 
 
 
 
3
  import cv2
4
  import numpy as np
5
  from PIL import Image
6
- import torch
7
- import json
8
- from datetime import datetime
9
- import tensorflow as tf
10
- from transformers import pipeline
11
- from ultralytics import YOLO
12
- from tensorflow.keras.models import load_model
13
- from langchain_community.document_loaders import PyPDFLoader
14
- from langchain.text_splitter import RecursiveCharacterTextSplitter
15
- from langchain_community.embeddings import HuggingFaceEmbeddings
16
- from langchain_community.vectorstores import FAISS
17
- from huggingface_hub import HfApi, HfFolder
18
- import spaces
19
-
20
- from src.config import Config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  class AIProcessor:
23
  def __init__(self):
24
- self.models_cache = {}
25
- self.knowledge_base_cache = {}
26
- self.config = Config()
27
- self.px_per_cm = 38
28
- self._initialize_models()
29
 
30
- def _initialize_models(self):
 
 
 
 
 
 
 
 
 
31
  try:
32
- HfFolder.save_token(self.config.HF_TOKEN)
 
 
 
 
33
 
34
- self.models_cache['yolo'] = YOLO(self.config.YOLO_MODEL_PATH)
35
- self.models_cache['segmentation'] = load_model(self.config.SEG_MODEL_PATH, compile=False)
36
 
37
- self.models_cache['medgemma_pipe'] = pipeline(
38
- "image-text-to-text",
39
- model="google/medgemma-4b-it",
40
- torch_dtype=torch.bfloat16,
41
- device_map="auto",
42
- token=self.config.HF_TOKEN
43
- )
 
 
 
 
 
44
 
45
- self.models_cache['embedding_model'] = HuggingFaceEmbeddings(
46
- model_name="sentence-transformers/all-MiniLM-L6-v2",
47
- model_kwargs={'device': 'cpu'}
48
- )
 
 
 
 
 
 
 
49
 
50
- self.models_cache['cls'] = pipeline(
51
- "image-classification",
52
- model="Hemg/Wound-classification",
53
- token=self.config.HF_TOKEN,
54
- device="cpu"
55
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- logging.info("βœ… All models loaded.")
58
- self._load_knowledge_base()
 
 
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  except Exception as e:
61
- logging.error(f"Error initializing AI models: {e}")
 
62
 
63
- def _load_knowledge_base(self):
 
64
  try:
65
- docs = []
66
- for pdf in self.config.GUIDELINE_PDFS:
67
- if os.path.exists(pdf):
68
- loader = PyPDFLoader(pdf)
69
- docs.extend(loader.load())
70
- if docs:
71
- splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
72
- chunks = splitter.split_documents(docs)
73
- vectorstore = FAISS.from_documents(chunks, self.models_cache['embedding_model'])
74
- self.knowledge_base_cache['vectorstore'] = vectorstore
75
- logging.info("βœ… Knowledge base loaded.")
76
- else:
77
- self.knowledge_base_cache['vectorstore'] = None
78
  except Exception as e:
79
- logging.warning(f"Knowledge base error: {e}")
80
-
81
- def perform_visual_analysis(self, image_pil):
82
- image_cv = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)
83
- results = self.models_cache['yolo'].predict(image_cv, verbose=False, device="cpu")
84
- if not results or not results[0].boxes:
85
- raise ValueError("No wound detected.")
86
-
87
- box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
88
- region_cv = image_cv[box[1]:box[3], box[0]:box[2]]
89
-
90
- input_size = self.models_cache['segmentation'].input_shape[1:3]
91
- resized = cv2.resize(region_cv, (input_size[1], input_size[0]))
92
- mask = self.models_cache['segmentation'].predict(np.expand_dims(resized / 255.0, 0), verbose=0)[0]
93
- mask_np = (mask[:, :, 0] > 0.5).astype(np.uint8)
94
-
95
- contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
96
- length = breadth = area = 0
97
- if contours:
98
- cnt = max(contours, key=cv2.contourArea)
99
- x, y, w, h = cv2.boundingRect(cnt)
100
- length = round(h / self.px_per_cm, 2)
101
- breadth = round(w / self.px_per_cm, 2)
102
- area = round(cv2.contourArea(cnt) / (self.px_per_cm ** 2), 2)
103
-
104
- wound_type = max(self.models_cache['cls'](Image.fromarray(cv2.cvtColor(region_cv, cv2.COLOR_BGR2RGB))), key=lambda x: x['score'])['label']
105
-
106
- return {
107
- 'wound_type': wound_type,
108
- 'length_cm': length,
109
- 'breadth_cm': breadth,
110
- 'surface_area_cm2': area
111
- }
112
-
113
- def query_guidelines(self, query: str):
114
- vector_store = self.knowledge_base_cache.get("vectorstore")
115
- if not vector_store:
116
- return "Knowledge base unavailable."
117
-
118
- retriever = vector_store.as_retriever(search_kwargs={"k": 10})
119
- docs = retriever.invoke(query)
120
- return "\n\n".join([
121
- f"Source: {doc.metadata.get('source', 'N/A')}, Page: {doc.metadata.get('page', 'N/A')}\nContent: {doc.page_content}"
122
- for doc in docs
123
- ])
124
-
125
- def generate_final_report(self, patient_info, visual_results, guideline_context, image_pil, max_new_tokens=2048):
126
- prompt = f"""
127
- 🩺 You are SmartHeal-AI, a world-class wound care AI specialist trained in clinical wound assessment and guideline-based treatment planning.
128
- Your task is to process the following structured inputs (patient data, wound measurements, clinical guidelines, and image) and perform **clinical reasoning and decision-making** to generate a complete wound care report.
129
- ---
130
- πŸ” **YOUR PROCESS β€” FOLLOW STRICTLY:**
131
- ### Step 1: Clinical Reasoning (Chain-of-Thought)
132
- Use the provided information to think step-by-step about:
133
- - Patient’s risk factors (e.g. diabetes, age, healing limitations)
134
- - Wound characteristics (size, tissue appearance, moisture, infection signs)
135
- - Visual clues from the image (location, granulation, maceration, inflammation, surrounding skin)
136
- - Clinical guidelines provided β€” selectively choose the ones most relevant to this case
137
- Do NOT list all guidelines verbatim. Use judgment: apply them where relevant. Explain why or why not.
138
- Also assess whether this wound appears:
139
- - Acute vs chronic
140
- - Surgical vs traumatic
141
- - Inflammatory vs proliferative healing phase
142
- ---
143
- ### Step 2: Structured Clinical Report
144
- Generate the following report sections using markdown and medical terminology:
145
- #### **1. Clinical Summary**
146
- - Describe wound appearance and tissue types (e.g., slough, necrotic, granulating, epithelializing)
147
- - Include size, wound bed condition, peri-wound skin, and signs of infection or biofilm
148
- - Mention inferred location (e.g., heel, forefoot) if image allows
149
- - Summarize patient's systemic risk profile
150
- #### **2. Medicinal & Dressing Recommendations**
151
- Based on your analysis:
152
- - Recommend specific **wound care dressings** (e.g., hydrocolloid, alginate, foam, antimicrobial silver, etc.) suitable to wound moisture level and infection risk
153
- - Propose **topical or systemic agents** ONLY if relevant β€” include name classes (e.g., antiseptic: povidone iodine, antibiotic ointments, enzymatic debriders)
154
- - Mention **techniques** (e.g., sharp debridement, NPWT, moisture balance, pressure offloading, dressing frequency)
155
- - Avoid repeating guidelines β€” **apply them**
156
- #### **3. Key Risk Factors**
157
- Explain how the patient’s condition (e.g., diabetic, poor circulation, advanced age, poor hygiene) may affect wound healing
158
- #### **4. Prognosis & Monitoring Advice**
159
- - Mention how often wound should be reassessed
160
- - Indicate signs to monitor for deterioration or improvement
161
- - Include when escalation to specialist is necessary
162
- #### **5. Disclaimer**
163
- This is an AI-generated summary based on available data. It is not a substitute for clinical evaluation by a wound care professional.
164
- **Note:** Every dressing change is a chance for wound reassessment. Always perform a thorough wound evaluation at each dressing change.
165
- ---
166
- 🧾 **INPUT DATA**
167
- **Patient Info:**
168
  {patient_info}
169
- **Wound Details:**
170
- - Type: {visual_results['wound_type']}
171
- - Size: {visual_results['length_cm']} Γ— {visual_results['breadth_cm']} cm
172
- - Area: {visual_results['surface_area_cm2']} cmΒ²
173
- **Clinical Guideline Evidence:**
174
- {guideline_context}
175
- You may now begin your analysis and generate the two-part report.
176
- """
177
 
178
- messages = [
179
- {
180
- "role": "system",
181
- "content": [{"type": "text", "text": "You are a world-class medical AI assistant..."}],
182
- },
183
- {
184
- "role": "user",
185
- "content": [
186
- {"type": "image", "image": image_pil},
187
- {"type": "text", "text": prompt},
188
- ]
189
- }
190
- ]
 
 
 
 
 
 
 
 
 
 
 
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  try:
193
- output = self.models_cache['medgemma_pipe'](
194
- text=messages,
195
- max_new_tokens=max_new_tokens,
196
- do_sample=False,
197
  )
198
- return output[0]['generated_text'][-1].get('content', '').strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  except Exception as e:
200
- logging.error(f"MedGemma error: {e}", exc_info=True)
201
- return f"❌ Failed to generate report: {e}"
202
-
203
- def save_and_commit_image(self, image_pil):
204
- filename = f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.png"
205
- local_path = os.path.join(self.config.UPLOADS_DIR, filename)
206
- image_pil.convert("RGB").save(local_path)
207
- logging.info(f"Image saved locally: {local_path}")
208
-
209
- if self.config.HF_TOKEN and self.config.DATASET_ID:
210
- try:
211
- api = HfApi()
212
- api.upload_file(
213
- path_or_fileobj=local_path,
214
- path_in_repo=f"images/{filename}",
215
- repo_id=self.config.DATASET_ID,
216
- repo_type="dataset",
217
- commit_message=f"Upload wound image: {filename}"
218
- )
219
- logging.info("βœ… Image uploaded to HF dataset.")
220
- except Exception as e:
221
- logging.warning(f"Upload failed: {e}")
222
-
223
- @spaces.GPU(enable_queue=True, duration=120)
224
- def full_analysis_pipeline(self, image, questionnaire_data):
225
  try:
226
- self.save_and_commit_image(image)
227
- visual = self.perform_visual_analysis(image)
228
- patient_info = ", ".join([f"{k}: {v}" for k, v in questionnaire_data.items()])
229
- query = f"best practices for managing a {visual['wound_type']} with moisture level '{questionnaire_data.get('moisture')}' and signs of infection '{questionnaire_data.get('infection')}' in a patient who is diabetic '{questionnaire_data.get('diabetic')}'"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  guideline_context = self.query_guidelines(query)
231
 
232
- return self.generate_final_report(patient_info, visual, guideline_context, image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
 
234
  except Exception as e:
235
- logging.error(f"Pipeline error: {e}", exc_info=True)
236
- return f"❌ Error: {e}"
 
 
 
 
 
 
 
 
1
+ # smartheal_ai_processor.py
2
+ # Verbose, instrumented version β€” preserves public class/function names
3
+ # Turn on deep logging: export LOGLEVEL=DEBUG SMARTHEAL_DEBUG=1
4
+
5
  import os
6
  import logging
7
+ from datetime import datetime
8
+ from typing import Optional, Dict, List, Tuple
9
+
10
+ # ---- Environment defaults (do NOT globally hint CUDA here) ----
11
+ os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
12
+ LOGLEVEL = os.getenv("LOGLEVEL", "INFO").upper()
13
+ SMARTHEAL_DEBUG = os.getenv("SMARTHEAL_DEBUG", "0") == "1"
14
+
15
  import cv2
16
  import numpy as np
17
  from PIL import Image
18
+ from PIL.ExifTags import TAGS
19
+
20
+ # --- Logging config ---
21
+ logging.basicConfig(
22
+ level=getattr(logging, LOGLEVEL, logging.INFO),
23
+ format="%(asctime)s - %(levelname)s - %(message)s",
24
+ )
25
+
26
+ def _log_kv(prefix: str, kv: Dict):
27
+ logging.debug(prefix + " | " + " | ".join(f"{k}={v}" for k, v in kv.items()))
28
+
29
+ # --- Spaces GPU decorator (REQUIRED) ---
30
+ from spaces import GPU as _SPACES_GPU
31
+
32
+ @_SPACES_GPU(enable_queue=True)
33
+ def smartheal_gpu_stub(ping: int = 0) -> str:
34
+ return "ready"
35
+
36
+ # ---- Paths / constants ----
37
+ UPLOADS_DIR = "uploads"
38
+ os.makedirs(UPLOADS_DIR, exist_ok=True)
39
+
40
+ HF_TOKEN = os.getenv("HF_TOKEN", None)
41
+ YOLO_MODEL_PATH = "src/best.pt"
42
+ SEG_MODEL_PATH = "src/segmentation_model.h5" # optional
43
+ GUIDELINE_PDFS = ["src/eHealth in Wound Care.pdf", "src/IWGDF Guideline.pdf", "src/evaluation.pdf"]
44
+ DATASET_ID = "SmartHeal/wound-image-uploads"
45
+ DEFAULT_PX_PER_CM = 38.0
46
+ PX_PER_CM_MIN, PX_PER_CM_MAX = 5.0, 1200.0
47
+
48
+ # Segmentation preprocessing knobs
49
+ SEG_EXPECTS_RGB = os.getenv("SEG_EXPECTS_RGB", "1") == "1" # most TF models trained on RGB
50
+ SEG_NORM = os.getenv("SEG_NORM", "0to1") # "0to1" | "imagenet"
51
+ SEG_THRESH = float(os.getenv("SEG_THRESH", "0.5"))
52
+
53
+ models_cache: Dict[str, object] = {}
54
+ knowledge_base_cache: Dict[str, object] = {}
55
+
56
+ # ---------- Utilities to prevent CUDA in main process ----------
57
+ from contextlib import contextmanager
58
+
59
+ @contextmanager
60
+ def _no_cuda_env():
61
+ """
62
+ Mask GPUs so any library imported/constructed in the main process
63
+ cannot see CUDA (required for Spaces Stateless GPU).
64
+ """
65
+ prev = os.environ.get("CUDA_VISIBLE_DEVICES")
66
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
67
+ try:
68
+ yield
69
+ finally:
70
+ if prev is None:
71
+ os.environ.pop("CUDA_VISIBLE_DEVICES", None)
72
+ else:
73
+ os.environ["CUDA_VISIBLE_DEVICES"] = prev
74
+
75
+ # ---------- Lazy imports (wrapped where needed) ----------
76
+ def _import_ultralytics():
77
+ # Prevent Ultralytics from probing CUDA on import
78
+ with _no_cuda_env():
79
+ from ultralytics import YOLO
80
+ return YOLO
81
+
82
+ def _import_tf_loader():
83
+ import tensorflow as tf
84
+ tf.config.set_visible_devices([], "GPU")
85
+ from tensorflow.keras.models import load_model
86
+ return load_model
87
+
88
+ def _import_hf_cls():
89
+ from transformers import pipeline
90
+ return pipeline
91
+
92
+ def _import_embeddings():
93
+ from langchain_community.embeddings import HuggingFaceEmbeddings
94
+ return HuggingFaceEmbeddings
95
+
96
+ def _import_langchain_pdf():
97
+ from langchain_community.document_loaders import PyPDFLoader
98
+ return PyPDFLoader
99
+
100
+ def _import_langchain_faiss():
101
+ from langchain_community.vectorstores import FAISS
102
+ return FAISS
103
+
104
+ def _import_hf_hub():
105
+ from huggingface_hub import HfApi, HfFolder
106
+ return HfApi, HfFolder
107
+
108
+ # ---------- SmartHeal prompts (system + user prefix) ----------
109
+ SMARTHEAL_SYSTEM_PROMPT = """\
110
+ You are SmartHeal Clinical Assistant, a wound-care decision-support system.
111
+ You analyze wound photographs and brief patient context to produce careful,
112
+ specific, guideline-informed recommendations WITHOUT diagnosing. You always:
113
+ - Use the measurements calculated by the vision pipeline as ground truth.
114
+ - Prefer concise, actionable steps tailored to exudate level, infection risk, and pain.
115
+ - Flag uncertainties and red flags that need escalation to a clinician.
116
+ - Avoid contraindicated advice; do not infer unseen comorbidities.
117
+ - Keep under 300 words and use the requested headings exactly.
118
+ - Tone: professional, clear, and conservative; no definitive medical claims.
119
+ - Safety: remind the user to seek clinician review for changes or red flags.
120
+ """
121
+
122
+ SMARTHEAL_USER_PREFIX = """\
123
+ Patient: {patient_info}
124
+ Visual findings: type={wound_type}, size={length_cm}x{breadth_cm} cm, area={area_cm2} cm^2,
125
+ detection_conf={det_conf:.2f}, calibration={px_per_cm} px/cm.
126
+
127
+ Guideline context (snippets you can draw principles from; do not quote at length):
128
+ {guideline_context}
129
+
130
+ Write a structured answer with these headings exactly:
131
+ 1. Clinical Summary (max 4 bullet points)
132
+ 2. Likely Stage/Type (if uncertain, say 'uncertain')
133
+ 3. Treatment Plan (specific dressing choices and frequency based on exudate/infection risk)
134
+ 4. Red Flags (what to escalate and when)
135
+ 5. Follow-up Cadence (days)
136
+ 6. Notes (assumptions/uncertainties)
137
+
138
+ Keep to 220–300 words. Do NOT provide diagnosis. Avoid contraindicated advice.
139
+ """
140
+
141
+ # ---------- VLM (MedGemma replaced with Qwen2-VL) ----------
142
+ def _vlm_infer_gpu(messages, model_id: str, max_new_tokens: int, token: Optional[str]):
143
+ """
144
+ Runs entirely inside a Spaces GPU worker. It's the ONLY place we allow CUDA init.
145
+ """
146
+ from transformers import pipeline
147
+ import torch # Ensure torch is imported here
148
+ pipe = pipeline(
149
+ task="image-text-to-text",
150
+ model=model_id,
151
+ torch_dtype=torch.bfloat16, # Use torch_dtype from the working example
152
+ device_map="auto", # CUDA init happens here, safely in GPU worker
153
+ token=token,
154
+ trust_remote_code=True,
155
+ model_kwargs={"low_cpu_mem_usage": True},
156
+ )
157
+ out = pipe(text=messages, max_new_tokens=max_new_tokens, do_sample=False, temperature=0.2)
158
+ try:
159
+ txt = out[0]["generated_text"][-1].get("content", "")
160
+ except Exception:
161
+ txt = out[0].get("generated_text", "")
162
+ return (txt or "").strip() or "⚠️ Empty response"
163
+
164
+ def generate_medgemma_report( # kept name so callers don't change
165
+ patient_info: str,
166
+ visual_results: Dict,
167
+ guideline_context: str,
168
+ image_pil: Image.Image,
169
+ max_new_tokens: Optional[int] = None,
170
+ ) -> str:
171
+ """
172
+ MedGemma replacement using Qwen/Qwen2-VL-2B-Instruct via image-text-to-text.
173
+ Loads & runs ONLY inside a GPU worker to satisfy Stateless GPU constraints.
174
+ """
175
+ if os.getenv("SMARTHEAL_ENABLE_VLM", "1") != "1":
176
+ return "⚠️ VLM disabled"
177
+
178
+ model_id = os.getenv("SMARTHEAL_VLM_MODEL", "Qwen/Qwen2-VL-2B-Instruct")
179
+ max_new_tokens = max_new_tokens or int(os.getenv("SMARTHEAL_VLM_MAX_TOKENS", "600"))
180
+
181
+ uprompt = SMARTHEAL_USER_PREFIX.format(
182
+ patient_info=patient_info,
183
+ wound_type=visual_results.get("wound_type", "Unknown"),
184
+ length_cm=visual_results.get("length_cm", 0),
185
+ breadth_cm=visual_results.get("breadth_cm", 0),
186
+ area_cm2=visual_results.get("surface_area_cm2", 0),
187
+ det_conf=float(visual_results.get("detection_confidence", 0.0)),
188
+ px_per_cm=visual_results.get("px_per_cm", "?"),
189
+ guideline_context=(guideline_context or "")[:900],
190
+ )
191
+
192
+ messages = [
193
+ {"role": "system", "content": [{"type": "text", "text": SMARTHEAL_SYSTEM_PROMPT}]},
194
+ {"role": "user", "content": [
195
+ {"type": "image", "image": image_pil},
196
+ {"type": "text", "text": uprompt},
197
+ ]},
198
+ ]
199
+
200
+ try:
201
+ # IMPORTANT: do not import transformers or touch CUDA here. Only call the GPU worker.
202
+ return _vlm_infer_gpu(messages, model_id, max_new_tokens, HF_TOKEN)
203
+ except Exception as e:
204
+ logging.error(f"VLM call failed: {e}")
205
+ return "⚠️ VLM error"
206
+
207
+ # ---------- Initialize CPU models ----------
208
+ def load_yolo_model():
209
+ YOLO = _import_ultralytics()
210
+ # Construct model with CUDA masked to avoid auto-selecting cuda:0
211
+ with _no_cuda_env():
212
+ model = YOLO(YOLO_MODEL_PATH)
213
+ return model
214
+ def load_segmentation_model():
215
+ import tensorflow as tf
216
+ load_model = _import_tf_loader()
217
+ return load_model(SEG_MODEL_PATH, compile=False, custom_objects={'InputLayer': tf.keras.layers.InputLayer})
218
+
219
+ def load_classification_pipeline():
220
+ pipe = _import_hf_cls()
221
+ return pipe("image-classification", model="Hemg/Wound-classification", token=HF_TOKEN, device="cpu")
222
+
223
+ def load_embedding_model():
224
+ Emb = _import_embeddings()
225
+ return Emb(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
226
+
227
+ def initialize_cpu_models() -> None:
228
+ if HF_TOKEN:
229
+ try:
230
+ HfApi, HfFolder = _import_hf_hub()
231
+ HfFolder.save_token(HF_TOKEN)
232
+ logging.info("βœ… HF token set")
233
+ except Exception as e:
234
+ logging.warning(f"HF token save failed: {e}")
235
+
236
+ if "det" not in models_cache:
237
+ try:
238
+ models_cache["det"] = load_yolo_model()
239
+ logging.info("βœ… YOLO loaded (CPU; CUDA masked in main)")
240
+ except Exception as e:
241
+ logging.error(f"YOLO load failed: {e}")
242
+
243
+ if "seg" not in models_cache:
244
+ try:
245
+ if os.path.exists(SEG_MODEL_PATH):
246
+ models_cache["seg"] = load_segmentation_model()
247
+ m = models_cache["seg"]
248
+ ishape = getattr(m, "input_shape", None)
249
+ oshape = getattr(m, "output_shape", None)
250
+ logging.info(f"βœ… Segmentation model loaded (CPU) | input_shape={ishape} output_shape={oshape}")
251
+ else:
252
+ models_cache["seg"] = None
253
+ logging.warning("Segmentation model file missing; skipping.")
254
+ except Exception as e:
255
+ models_cache["seg"] = None
256
+ logging.warning(f"Segmentation unavailable: {e}")
257
+
258
+ if "cls" not in models_cache:
259
+ try:
260
+ models_cache["cls"] = load_classification_pipeline()
261
+ logging.info("βœ… Classifier loaded (CPU)")
262
+ except Exception as e:
263
+ models_cache["cls"] = None
264
+ logging.warning(f"Classifier unavailable: {e}")
265
+
266
+ if "embedding_model" not in models_cache:
267
+ try:
268
+ models_cache["embedding_model"] = load_embedding_model()
269
+ logging.info("βœ… Embeddings loaded (CPU)")
270
+ except Exception as e:
271
+ models_cache["embedding_model"] = None
272
+ logging.warning(f"Embeddings unavailable: {e}")
273
+
274
+ def setup_knowledge_base() -> None:
275
+ if "vector_store" in knowledge_base_cache:
276
+ return
277
+ docs: List = []
278
+ try:
279
+ PyPDFLoader = _import_langchain_pdf()
280
+ for pdf in GUIDELINE_PDFS:
281
+ if os.path.exists(pdf):
282
+ try:
283
+ docs.extend(PyPDFLoader(pdf).load())
284
+ logging.info(f"Loaded PDF: {pdf}")
285
+ except Exception as e:
286
+ logging.warning(f"PDF load failed ({pdf}): {e}")
287
+ except Exception as e:
288
+ logging.warning(f"LangChain PDF loader unavailable: {e}")
289
+
290
+ if docs and models_cache.get("embedding_model"):
291
+ try:
292
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
293
+ FAISS = _import_langchain_faiss()
294
+ chunks = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100).split_documents(docs)
295
+ knowledge_base_cache["vector_store"] = FAISS.from_documents(chunks, models_cache["embedding_model"])
296
+ logging.info(f"βœ… Knowledge base ready ({len(chunks)} chunks)")
297
+ except Exception as e:
298
+ knowledge_base_cache["vector_store"] = None
299
+ logging.warning(f"KB build failed: {e}")
300
+ else:
301
+ knowledge_base_cache["vector_store"] = None
302
+ logging.warning("KB disabled (no docs or embeddings).")
303
+
304
+ initialize_cpu_models()
305
+ setup_knowledge_base()
306
+
307
+ # ---------- Calibration helpers ----------
308
+ def _exif_to_dict(pil_img: Image.Image) -> Dict[str, object]:
309
+ out = {}
310
+ try:
311
+ exif = pil_img.getexif()
312
+ if not exif:
313
+ return out
314
+ for k, v in exif.items():
315
+ tag = TAGS.get(k, k)
316
+ out[tag] = v
317
+ except Exception:
318
+ pass
319
+ return out
320
+
321
+ def _to_float(val) -> Optional[float]:
322
+ try:
323
+ if val is None:
324
+ return None
325
+ if isinstance(val, tuple) and len(val) == 2:
326
+ num, den = float(val[0]), float(val[1]) if float(val[1]) != 0 else 1.0
327
+ return num / den
328
+ return float(val)
329
+ except Exception:
330
+ return None
331
+
332
+ def _estimate_sensor_width_mm(f_mm: Optional[float], f35: Optional[float]) -> Optional[float]:
333
+ if f_mm and f35 and f35 > 0:
334
+ return 36.0 * f_mm / f35
335
+ return None
336
+
337
+ def estimate_px_per_cm_from_exif(pil_img: Image.Image, default_px_per_cm: float = DEFAULT_PX_PER_CM) -> Tuple[float, Dict]:
338
+ meta = {"used": "default", "f_mm": None, "f35": None, "sensor_w_mm": None, "distance_m": None}
339
+ try:
340
+ exif = _exif_to_dict(pil_img)
341
+ f_mm = _to_float(exif.get("FocalLength"))
342
+ f35 = _to_float(exif.get("FocalLengthIn35mmFilm") or exif.get("FocalLengthIn35mm"))
343
+ subj_dist_m = _to_float(exif.get("SubjectDistance"))
344
+ sensor_w_mm = _estimate_sensor_width_mm(f_mm, f35)
345
+ meta.update({"f_mm": f_mm, "f35": f35, "sensor_w_mm": sensor_w_mm, "distance_m": subj_dist_m})
346
+
347
+ if f_mm and sensor_w_mm and subj_dist_m and subj_dist_m > 0:
348
+ w_px = pil_img.width
349
+ field_w_mm = sensor_w_mm * (subj_dist_m * 1000.0) / f_mm
350
+ field_w_cm = field_w_mm / 10.0
351
+ px_per_cm = w_px / max(field_w_cm, 1e-6)
352
+ px_per_cm = float(np.clip(px_per_cm, PX_PER_CM_MIN, PX_PER_CM_MAX))
353
+ meta["used"] = "exif"
354
+ return px_per_cm, meta
355
+ return float(default_px_per_cm), meta
356
+ except Exception:
357
+ return float(default_px_per_cm), meta
358
+
359
+ # ---------- Segmentation helpers ----------
360
+ def _imagenet_norm(arr: np.ndarray) -> np.ndarray:
361
+ mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
362
+ std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
363
+ return (arr.astype(np.float32) - mean) / std
364
+
365
+ def _preprocess_for_seg(bgr_roi: np.ndarray, target_hw: Tuple[int, int]) -> np.ndarray:
366
+ H, W = target_hw
367
+ resized = cv2.resize(bgr_roi, (W, H), interpolation=cv2.INTER_LINEAR)
368
+ if SEG_EXPECTS_RGB:
369
+ resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
370
+ if SEG_NORM.lower() == "imagenet":
371
+ x = _imagenet_norm(resized)
372
+ else:
373
+ x = resized.astype(np.float32) / 255.0
374
+ x = np.expand_dims(x, axis=0) # (1,H,W,3)
375
+ return x
376
+
377
+ def _to_prob(pred: np.ndarray) -> np.ndarray:
378
+ p = np.squeeze(pred)
379
+ pmin, pmax = float(p.min()), float(p.max())
380
+ if pmax > 1.0 or pmin < 0.0:
381
+ p = 1.0 / (1.0 + np.exp(-p))
382
+ return p.astype(np.float32)
383
+
384
+ # ---- Adaptive threshold + GrabCut grow ----
385
+ def _adaptive_prob_threshold(p: np.ndarray) -> float:
386
+ """
387
+ Choose a threshold that avoids tiny blobs while not swallowing skin.
388
+ Try Otsu and the 90th percentile, clamp to [0.25, 0.65], pick by area heuristic.
389
+ """
390
+ p01 = np.clip(p.astype(np.float32), 0, 1)
391
+ p255 = (p01 * 255).astype(np.uint8)
392
+
393
+ ret_otsu, _ = cv2.threshold(p255, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
394
+ thr_otsu = float(np.clip(ret_otsu / 255.0, 0.25, 0.65))
395
+ thr_pctl = float(np.clip(np.percentile(p01, 90), 0.25, 0.65))
396
+
397
+ def area_frac(thr: float) -> float:
398
+ return float((p01 >= thr).sum()) / float(p01.size)
399
+
400
+ af_otsu = area_frac(thr_otsu)
401
+ af_pctl = area_frac(thr_pctl)
402
+
403
+ def score(af: float) -> float:
404
+ target_low, target_high = 0.03, 0.10
405
+ if af < target_low: return abs(af - target_low) * 3.0
406
+ if af > target_high: return abs(af - target_high) * 1.5
407
+ return 0.0
408
+
409
+ return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
410
+
411
+ def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
412
+ """Grow from a confident core into low-contrast margins."""
413
+ h, w = bgr.shape[:2]
414
+ gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
415
+ k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
416
+ seed_dil = cv2.dilate(seed01, k, iterations=1)
417
+ gc[seed01.astype(bool)] = cv2.GC_PR_FGD
418
+ gc[seed_dil.astype(bool)] = cv2.GC_FGD
419
+ gc[0, :], gc[-1, :], gc[:, 0], gc[:, 1] = cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD
420
+ bgdModel = np.zeros((1, 65), np.float64)
421
+ fgdModel = np.zeros((1, 65), np.float64)
422
+ cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
423
+ return np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
424
+
425
+ def _fill_holes(mask01: np.ndarray) -> np.ndarray:
426
+ h, w = mask01.shape[:2]
427
+ ff = np.zeros((h + 2, w + 2), np.uint8)
428
+ m = (mask01 * 255).astype(np.uint8).copy()
429
+ cv2.floodFill(m, ff, (0, 0), 255)
430
+ m_inv = cv2.bitwise_not(m)
431
+ out = ((mask01 * 255) | m_inv) // 255
432
+ return out.astype(np.uint8)
433
+
434
+ def _clean_mask(mask01: np.ndarray) -> np.ndarray:
435
+ """Open β†’ Close β†’ Fill holes β†’ Largest component (no dilation)."""
436
+ mask01 = (mask01 > 0).astype(np.uint8)
437
+ k3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
438
+ k5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
439
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k3, iterations=1)
440
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k5, iterations=1)
441
+ mask01 = _fill_holes(mask01)
442
+ # Keep largest component only
443
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(mask01, 8)
444
+ if num > 1:
445
+ areas = stats[1:, cv2.CC_STAT_AREA]
446
+ if areas.size:
447
+ largest_idx = 1 + int(np.argmax(areas))
448
+ mask01 = (labels == largest_idx).astype(np.uint8)
449
+ return (mask01 > 0).astype(np.uint8)
450
+
451
+ # Global last debug dict (per-process)
452
+ _last_seg_debug: Dict[str, object] = {}
453
+
454
+ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndarray, Dict[str, object]]:
455
+ """
456
+ TF model β†’ adaptive threshold on prob β†’ GrabCut grow β†’ cleanup.
457
+ Fallback: KMeans-Lab.
458
+ Returns (mask_uint8_0_255, debug_dict)
459
+ """
460
+ debug = {"used": None, "reason": None, "positive_fraction": 0.0,
461
+ "thr": None, "heatmap_path": None, "roi_seen_by_model": None}
462
 
463
+ seg_model = models_cache.get("seg", None)
464
+
465
+ # --- Model path ---
466
+ if seg_model is not None:
467
+ try:
468
+ ishape = getattr(seg_model, "input_shape", None)
469
+ if not ishape or len(ishape) < 4:
470
+ raise ValueError(f"Bad seg input_shape: {ishape}")
471
+ th, tw = int(ishape[1]), int(ishape[2])
472
+
473
+ x = _preprocess_for_seg(image_bgr, (th, tw))
474
+ roi_seen_path = None
475
+ if SMARTHEAL_DEBUG:
476
+ roi_seen_path = os.path.join(out_dir, f"roi_for_seg_{ts}.png")
477
+ cv2.imwrite(roi_seen_path, image_bgr)
478
+
479
+ pred = seg_model.predict(x, verbose=0)
480
+ if isinstance(pred, (list, tuple)): pred = pred[0]
481
+ p = _to_prob(pred)
482
+ p = cv2.resize(p, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
483
+
484
+ heatmap_path = None
485
+ if SMARTHEAL_DEBUG:
486
+ hm = (np.clip(p, 0, 1) * 255).astype(np.uint8)
487
+ heat = cv2.applyColorMap(hm, cv2.COLORMAP_JET)
488
+ heatmap_path = os.path.join(out_dir, f"seg_pred_heatmap_{ts}.png")
489
+ cv2.imwrite(heatmap_path, heat)
490
+
491
+ thr = _adaptive_prob_threshold(p)
492
+ core01 = (p >= thr).astype(np.uint8)
493
+ core_frac = float(core01.sum()) / float(core01.size)
494
+
495
+ if core_frac < 0.005:
496
+ thr2 = max(thr - 0.10, 0.15)
497
+ core01 = (p >= thr2).astype(np.uint8)
498
+ thr = thr2
499
+ core_frac = float(core01.sum()) / float(core01.size)
500
+
501
+ if core01.any():
502
+ gc01 = _grabcut_refine(image_bgr, core01, iters=3)
503
+ mask01 = _clean_mask(gc01)
504
+ else:
505
+ mask01 = np.zeros(core01.shape, np.uint8)
506
+
507
+ pos_frac = float(mask01.sum()) / float(mask01.size)
508
+ logging.info(f"SegModel USED | thr={float(thr):.2f} core_frac={core_frac:.4f} final_frac={pos_frac:.4f}")
509
+
510
+ debug.update({
511
+ "used": "tf_model",
512
+ "reason": "ok",
513
+ "positive_fraction": pos_frac,
514
+ "thr": float(thr),
515
+ "heatmap_path": heatmap_path,
516
+ "roi_seen_by_model": roi_seen_path
517
+ })
518
+ return (mask01 * 255).astype(np.uint8), debug
519
+
520
+ except Exception as e:
521
+ logging.warning(f"⚠️ Segmentation model failed β†’ fallback. Reason: {e}")
522
+ debug.update({"used": "fallback_kmeans", "reason": f"model_failed: {e}"})
523
+
524
+ # --- Fallback: KMeans in Lab (reddest cluster as wound) ---
525
+ Z = image_bgr.reshape((-1, 3)).astype(np.float32)
526
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
527
+ _, labels, centers = cv2.kmeans(Z, 2, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
528
+ centers_u8 = centers.astype(np.uint8).reshape(1, 2, 3)
529
+ centers_lab = cv2.cvtColor(centers_u8, cv2.COLOR_BGR2LAB)[0]
530
+ wound_idx = int(np.argmax(centers_lab[:, 1])) # maximize a* (red)
531
+ mask01 = (labels.reshape(image_bgr.shape[:2]) == wound_idx).astype(np.uint8)
532
+ mask01 = _clean_mask(mask01)
533
+
534
+ pos_frac = float(mask01.sum()) / float(mask01.size)
535
+ logging.info(f"KMeans USED | final_frac={pos_frac:.4f}")
536
+
537
+ debug.update({
538
+ "used": "fallback_kmeans",
539
+ "reason": debug.get("reason") or "no_model",
540
+ "positive_fraction": pos_frac,
541
+ "thr": None
542
+ })
543
+ return (mask01 * 255).astype(np.uint8), debug
544
+
545
+ # ---------- Measurement + overlay helpers ----------
546
+ def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.ndarray:
547
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(binary01.astype(np.uint8), connectivity=8)
548
+ if num <= 1:
549
+ return binary01.astype(np.uint8)
550
+ areas = stats[1:, cv2.CC_STAT_AREA]
551
+ if areas.size == 0 or areas.max() < min_area_px:
552
+ return binary01.astype(np.uint8)
553
+ largest_idx = 1 + int(np.argmax(areas))
554
+ return (labels == largest_idx).astype(np.uint8)
555
+
556
+ def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, float, Tuple]:
557
+ contours, _ = cv2.findContours(mask01.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
558
+ if not contours:
559
+ return 0.0, 0.0, (None, None)
560
+ cnt = max(contours, key=cv2.contourArea)
561
+ rect = cv2.minAreaRect(cnt)
562
+ (w_px, h_px) = rect[1]
563
+ length_px, breadth_px = (max(w_px, h_px), min(w_px, h_px))
564
+ length_cm = round(length_px / max(px_per_cm, 1e-6), 2)
565
+ breadth_cm = round(breadth_px / max(px_per_cm, 1e-6), 2)
566
+ box = cv2.boxPoints(rect).astype(int)
567
+ return length_cm, breadth_cm, (box, rect[0])
568
+
569
+ def area_cm2_from_contour(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, Optional[np.ndarray]]:
570
+ """Area from largest polygon (sub-pixel); returns (area_cm2, contour)."""
571
+ m = (mask01 > 0).astype(np.uint8)
572
+ contours, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
573
+ if not contours:
574
+ return 0.0, None
575
+ cnt = max(contours, key=cv2.contourArea)
576
+ poly_area_px2 = float(cv2.contourArea(cnt))
577
+ area_cm2 = round(poly_area_px2 / (max(px_per_cm, 1e-6) ** 2), 2)
578
+ return area_cm2, cnt
579
+
580
+ def clamp_area_with_minrect(cnt: np.ndarray, px_per_cm: float, area_cm2_poly: float) -> float:
581
+ rect = cv2.minAreaRect(cnt)
582
+ (w_px, h_px) = rect[1]
583
+ rect_area_px2 = float(max(w_px, 0.0) * max(h_px, 0.0))
584
+ rect_area_cm2 = rect_area_px2 / (max(px_per_cm, 1e-6) ** 2)
585
+ return round(min(area_cm2_poly, rect_area_cm2 * 1.05), 2)
586
+
587
+ def draw_measurement_overlay(
588
+ base_bgr: np.ndarray,
589
+ mask01: np.ndarray,
590
+ rect_box: np.ndarray,
591
+ length_cm: float,
592
+ breadth_cm: float,
593
+ thickness: int = 2
594
+ ) -> np.ndarray:
595
+ """
596
+ 1) Strong red mask overlay + white contour
597
+ 2) Min-area rectangle
598
+ 3) Double-headed arrows labeled Length/Width
599
+ """
600
+ overlay = base_bgr.copy()
601
+
602
+ # Mask tint
603
+ mask255 = (mask01 * 255).astype(np.uint8)
604
+ mask3 = cv2.merge([mask255, mask255, mask255])
605
+ red = np.zeros_like(overlay); red[:] = (0, 0, 255)
606
+ alpha = 0.55
607
+ tinted = cv2.addWeighted(overlay, 1 - alpha, red, alpha, 0)
608
+ overlay = np.where(mask3 > 0, tinted, overlay)
609
+
610
+ # Contour
611
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
612
+ if cnts:
613
+ cv2.drawContours(overlay, cnts, -1, (255, 255, 255), 2)
614
+
615
+ if rect_box is not None:
616
+ cv2.polylines(overlay, [rect_box], True, (255, 255, 255), thickness)
617
+ pts = rect_box.reshape(-1, 2)
618
+
619
+ def midpoint(a, b): return (int((a[0] + b[0]) / 2), int((a[1] + b[1]) / 2))
620
+ e = [np.linalg.norm(pts[i] - pts[(i + 1) % 4]) for i in range(4)]
621
+ long_edge_idx = int(np.argmax(e))
622
+ mids = [midpoint(pts[i], pts[(i + 1) % 4]) for i in range(4)]
623
+ long_pair = (long_edge_idx, (long_edge_idx + 2) % 4)
624
+ short_pair = ((long_edge_idx + 1) % 4, (long_edge_idx + 3) % 4)
625
+
626
+ def draw_double_arrow(img, p1, p2):
627
+ cv2.arrowedLine(img, p1, p2, (0, 0, 0), thickness + 2, tipLength=0.05)
628
+ cv2.arrowedLine(img, p2, p1, (0, 0, 0), thickness + 2, tipLength=0.05)
629
+ cv2.arrowedLine(img, p1, p2, (255, 255, 255), thickness, tipLength=0.05)
630
+ cv2.arrowedLine(img, p2, p1, (255, 255, 255), thickness, tipLength=0.05)
631
+
632
+ def put_label(text, anchor):
633
+ org = (anchor[0] + 6, anchor[1] - 6)
634
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 4, cv2.LINE_AA)
635
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
636
+
637
+ draw_double_arrow(overlay, mids[long_pair[0]], mids[long_pair[1]])
638
+ draw_double_arrow(overlay, mids[short_pair[0]], mids[short_pair[1]])
639
+ put_label(f"Length: {length_cm:.2f} cm", mids[long_pair[0]])
640
+ put_label(f"Width: {breadth_cm:.2f} cm", mids[short_pair[0]])
641
+
642
+ return overlay
643
+
644
+ # ---------- AI PROCESSOR ----------
645
  class AIProcessor:
646
  def __init__(self):
647
+ self.models_cache = models_cache
648
+ self.knowledge_base_cache = knowledge_base_cache
649
+ self.uploads_dir = UPLOADS_DIR
650
+ self.dataset_id = DATASET_ID
651
+ self.hf_token = HF_TOKEN
652
 
653
+ def _ensure_analysis_dir(self) -> str:
654
+ out_dir = os.path.join(self.uploads_dir, "analysis")
655
+ os.makedirs(out_dir, exist_ok=True)
656
+ return out_dir
657
+
658
+ def perform_visual_analysis(self, image_pil: Image.Image) -> Dict:
659
+ """
660
+ YOLO detect β†’ crop ROI β†’ segment_wound(ROI) β†’ clean mask β†’
661
+ minAreaRect measurement (cm) using EXIF px/cm β†’ save outputs.
662
+ """
663
  try:
664
+ px_per_cm, exif_meta = estimate_px_per_cm_from_exif(image_pil, DEFAULT_PX_PER_CM)
665
+ # Guardrails for calibration to avoid huge area blow-ups
666
+ px_per_cm = float(np.clip(px_per_cm, 20.0, 350.0))
667
+ if (exif_meta or {}).get("used") != "exif":
668
+ logging.warning(f"Calibration fallback used: px_per_cm={px_per_cm:.2f} (default). Prefer ruler/Aruco for accuracy.")
669
 
670
+ image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
 
671
 
672
+ # --- Detection ---
673
+ det_model = self.models_cache.get("det")
674
+ if det_model is None:
675
+ raise RuntimeError("YOLO model not loaded")
676
+ # Force CPU inference and avoid CUDA touch
677
+ results = det_model.predict(image_cv, verbose=False, device="cpu")
678
+ if (not results) or (not getattr(results[0], "boxes", None)) or (len(results[0].boxes) == 0):
679
+ try:
680
+ import gradio as gr
681
+ raise gr.Error("No wound could be detected.")
682
+ except Exception:
683
+ raise RuntimeError("No wound could be detected.")
684
 
685
+ box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
686
+ x1, y1, x2, y2 = [int(v) for v in box]
687
+ x1, y1 = max(0, x1), max(0, y1)
688
+ x2, y2 = min(image_cv.shape[1], x2), min(image_cv.shape[0], y2)
689
+ roi = image_cv[y1:y2, x1:x2].copy()
690
+ if roi.size == 0:
691
+ try:
692
+ import gradio as gr
693
+ raise gr.Error("Detected ROI is empty.")
694
+ except Exception:
695
+ raise RuntimeError("Detected ROI is empty.")
696
 
697
+ out_dir = self._ensure_analysis_dir()
698
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
699
+
700
+ # --- Segmentation (model-first + KMeans fallback) ---
701
+ mask_u8_255, seg_debug = segment_wound(roi, ts, out_dir)
702
+ mask01 = (mask_u8_255 > 127).astype(np.uint8)
703
+
704
+ if mask01.any():
705
+ mask01 = _clean_mask(mask01)
706
+ logging.debug(f"Mask postproc: px_after={int(mask01.sum())}")
707
+
708
+ # --- Measurement (accurate & conservative) ---
709
+ if mask01.any():
710
+ length_cm, breadth_cm, (box_pts, _) = measure_min_area_rect(mask01, px_per_cm)
711
+ area_poly_cm2, largest_cnt = area_cm2_from_contour(mask01, px_per_cm)
712
+ if largest_cnt is not None:
713
+ surface_area_cm2 = clamp_area_with_minrect(largest_cnt, px_per_cm, area_poly_cm2)
714
+ else:
715
+ surface_area_cm2 = area_poly_cm2
716
+
717
+ anno_roi = draw_measurement_overlay(roi, mask01, box_pts, length_cm, breadth_cm)
718
+ segmentation_empty = False
719
+ else:
720
+ # Fallback if seg failed: use ROI dimensions
721
+ h_px = max(0, y2 - y1); w_px = max(0, x2 - x1)
722
+ length_cm = round(max(h_px, w_px) / px_per_cm, 2)
723
+ breadth_cm = round(min(h_px, w_px) / px_per_cm, 2)
724
+ surface_area_cm2 = round((h_px * w_px) / (px_per_cm ** 2), 2)
725
+ anno_roi = roi.copy()
726
+ cv2.rectangle(anno_roi, (2, 2), (anno_roi.shape[1]-3, anno_roi.shape[0]-3), (0, 0, 255), 3)
727
+ cv2.line(anno_roi, (0, 0), (anno_roi.shape[1]-1, anno_roi.shape[0]-1), (0, 0, 255), 2)
728
+ cv2.line(anno_roi, (anno_roi.shape[1]-1, 0), (0, anno_roi.shape[0]-1), (0, 0, 255), 2)
729
+ box_pts = None
730
+ segmentation_empty = True
731
+
732
+ # --- Save visualizations ---
733
+ original_path = os.path.join(out_dir, f"original_{ts}.png")
734
+ cv2.imwrite(original_path, image_cv)
735
 
736
+ det_vis = image_cv.copy()
737
+ cv2.rectangle(det_vis, (x1, y1), (x2, y2), (0, 255, 0), 2)
738
+ detection_path = os.path.join(out_dir, f"detection_{ts}.png")
739
+ cv2.imwrite(detection_path, det_vis)
740
 
741
+ roi_mask_path = os.path.join(out_dir, f"roi_mask_{ts}.png")
742
+ cv2.imwrite(roi_mask_path, (mask01 * 255).astype(np.uint8))
743
+
744
+ # ROI overlay (mask tint + contour, without arrows)
745
+ mask255 = (mask01 * 255).astype(np.uint8)
746
+ mask3 = cv2.merge([mask255, mask255, mask255])
747
+ red = np.zeros_like(roi); red[:] = (0, 0, 255)
748
+ alpha = 0.55
749
+ tinted = cv2.addWeighted(roi, 1 - alpha, red, alpha, 0)
750
+ if mask255.any():
751
+ roi_overlay = np.where(mask3 > 0, tinted, roi)
752
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
753
+ cv2.drawContours(roi_overlay, cnts, -1, (255, 255, 255), 2)
754
+ else:
755
+ roi_overlay = anno_roi
756
+
757
+ seg_full = image_cv.copy()
758
+ seg_full[y1:y2, x1:x2] = roi_overlay
759
+ segmentation_path = os.path.join(out_dir, f"segmentation_{ts}.png")
760
+ cv2.imwrite(segmentation_path, seg_full)
761
+
762
+ segmentation_roi_path = os.path.join(out_dir, f"segmentation_roi_{ts}.png")
763
+ cv2.imwrite(segmentation_roi_path, roi_overlay)
764
+
765
+ # Annotated (mask + arrows + labels) in full-frame
766
+ anno_full = image_cv.copy()
767
+ anno_full[y1:y2, x1:x2] = anno_roi
768
+ annotated_seg_path = os.path.join(out_dir, f"segmentation_annotated_{ts}.png")
769
+ cv2.imwrite(annotated_seg_path, anno_full)
770
+
771
+ # --- Optional classification ---
772
+ wound_type = "Unknown"
773
+ cls_pipe = self.models_cache.get("cls")
774
+ if cls_pipe is not None:
775
+ try:
776
+ preds = cls_pipe(Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)))
777
+ if preds:
778
+ wound_type = max(preds, key=lambda x: x.get("score", 0)).get("label", "Unknown")
779
+ except Exception as e:
780
+ logging.warning(f"Classification failed: {e}")
781
+
782
+ # Log end-of-seg summary
783
+ seg_summary = {
784
+ "seg_used": seg_debug.get("used"),
785
+ "seg_reason": seg_debug.get("reason"),
786
+ "positive_fraction": round(float(seg_debug.get("positive_fraction", 0.0)), 6),
787
+ "threshold": seg_debug.get("thr"),
788
+ "segmentation_empty": segmentation_empty,
789
+ "exif_px_per_cm": round(px_per_cm, 3),
790
+ }
791
+ _log_kv("SEG_SUMMARY", seg_summary)
792
+
793
+ return {
794
+ "wound_type": wound_type,
795
+ "length_cm": length_cm,
796
+ "breadth_cm": breadth_cm,
797
+ "surface_area_cm2": surface_area_cm2,
798
+ "px_per_cm": round(px_per_cm, 2),
799
+ "calibration_meta": exif_meta,
800
+ "detection_confidence": float(results[0].boxes.conf[0].cpu().item())
801
+ if getattr(results[0].boxes, "conf", None) is not None else 0.0,
802
+ "detection_image_path": detection_path,
803
+ "segmentation_image_path": annotated_seg_path,
804
+ "segmentation_annotated_path": annotated_seg_path,
805
+ "segmentation_roi_path": segmentation_roi_path,
806
+ "roi_mask_path": roi_mask_path,
807
+ "segmentation_empty": segmentation_empty,
808
+ "segmentation_debug": seg_debug,
809
+ "original_image_path": original_path,
810
+ }
811
  except Exception as e:
812
+ logging.error(f"Visual analysis failed: {e}", exc_info=True)
813
+ raise
814
 
815
+ # ---------- Knowledge base + reporting ----------
816
+ def query_guidelines(self, query: str) -> str:
817
  try:
818
+ vs = self.knowledge_base_cache.get("vector_store")
819
+ if not vs:
820
+ return "Knowledge base is not available."
821
+ retriever = vs.as_retriever(search_kwargs={"k": 5})
822
+ # Modern API (avoid get_relevant_documents deprecation)
823
+ docs = retriever.invoke(query)
824
+ lines: List[str] = []
825
+ for d in docs:
826
+ src = (d.metadata or {}).get("source", "N/A")
827
+ txt = (d.page_content or "")[:300]
828
+ lines.append(f"Source: {src}\nContent: {txt}...")
829
+ return "\n\n".join(lines) if lines else "No relevant guideline snippets found."
 
830
  except Exception as e:
831
+ logging.warning(f"Guidelines query failed: {e}")
832
+ return f"Guidelines query failed: {str(e)}"
833
+
834
+ def _generate_fallback_report(self, patient_info: str, visual_results: Dict, guideline_context: str) -> str:
835
+ return f"""# 🩺 SmartHeal AI - Comprehensive Wound Analysis Report
836
+
837
+ ## πŸ“‹ Patient Information
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
838
  {patient_info}
 
 
 
 
 
 
 
 
839
 
840
+ ## πŸ” Visual Analysis Results
841
+ - **Wound Type**: {visual_results.get('wound_type', 'Unknown')}
842
+ - **Dimensions**: {visual_results.get('length_cm', 0)} cm Γ— {visual_results.get('breadth_cm', 0)} cm
843
+ - **Surface Area**: {visual_results.get('surface_area_cm2', 0)} cmΒ²
844
+ - **Detection Confidence**: {visual_results.get('detection_confidence', 0):.1%}
845
+ - **Calibration**: {visual_results.get('px_per_cm','?')} px/cm ({(visual_results.get('calibration_meta') or {}).get('used','default')})
846
+
847
+ ## πŸ“Š Analysis Images
848
+ - **Original**: {visual_results.get('original_image_path', 'N/A')}
849
+ - **Detection**: {visual_results.get('detection_image_path', 'N/A')}
850
+ - **Segmentation**: {visual_results.get('segmentation_image_path', 'N/A')}
851
+ - **Annotated**: {visual_results.get('segmentation_annotated_path', 'N/A')}
852
+
853
+ ## 🎯 Clinical Summary
854
+ Automated analysis provides quantitative measurements; verify via clinical examination.
855
+
856
+ ## πŸ’Š Recommendations
857
+ - Cleanse wound gently; select dressing per exudate/infection risk
858
+ - Debride necrotic tissue if indicated (clinical decision)
859
+ - Document with serial photos and measurements
860
+
861
+ ## πŸ“… Monitoring
862
+ - Daily in week 1, then every 2–3 days (or as indicated)
863
+ - Weekly progress review
864
 
865
+ ## πŸ“š Guideline Context
866
+ {(guideline_context or '')[:800]}{"..." if guideline_context and len(guideline_context) > 800 else ''}
867
+
868
+ **Disclaimer:** Automated, for decision support only. Verify clinically.
869
+ """
870
+
871
+ def generate_final_report(
872
+ self,
873
+ patient_info: str,
874
+ visual_results: Dict,
875
+ guideline_context: str,
876
+ image_pil: Image.Image,
877
+ max_new_tokens: Optional[int] = None,
878
+ ) -> str:
879
  try:
880
+ report = generate_medgemma_report(
881
+ patient_info, visual_results, guideline_context, image_pil, max_new_tokens
 
 
882
  )
883
+ if report and report.strip() and not report.startswith(("⚠️", "❌")):
884
+ return report
885
+ logging.warning("VLM unavailable/invalid; using fallback.")
886
+ return self._generate_fallback_report(patient_info, visual_results, guideline_context)
887
+ except Exception as e:
888
+ logging.error(f"Report generation failed: {e}")
889
+ return self._generate_fallback_report(patient_info, visual_results, guideline_context)
890
+
891
+ def save_and_commit_image(self, image_pil: Image.Image) -> str:
892
+ try:
893
+ os.makedirs(self.uploads_dir, exist_ok=True)
894
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
895
+ filename = f"{ts}.png"
896
+ path = os.path.join(self.uploads_dir, filename)
897
+ image_pil.convert("RGB").save(path)
898
+ logging.info(f"βœ… Image saved locally: {path}")
899
+
900
+ if HF_TOKEN and DATASET_ID:
901
+ try:
902
+ HfApi, HfFolder = _import_hf_hub()
903
+ HfFolder.save_token(HF_TOKEN)
904
+ api = HfApi()
905
+ api.upload_file(
906
+ path_or_fileobj=path,
907
+ path_in_repo=f"images/{filename}",
908
+ repo_id=DATASET_ID,
909
+ repo_type="dataset",
910
+ token=HF_TOKEN,
911
+ commit_message=f"Upload wound image: {filename}",
912
+ )
913
+ logging.info("βœ… Image committed to HF dataset")
914
+ except Exception as e:
915
+ logging.warning(f"HF upload failed: {e}")
916
+
917
+ return path
918
  except Exception as e:
919
+ logging.error(f"Failed to save/commit image: {e}")
920
+ return ""
921
+
922
+ @_SPACES_GPU(enable_queue=True)
923
+ def full_analysis_pipeline(self, image_pil: Image.Image, questionnaire_data: Dict) -> Dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
924
  try:
925
+ saved_path = self.save_and_commit_image(image_pil)
926
+ visual_results = self.perform_visual_analysis(image_pil)
927
+
928
+ pi = questionnaire_data or {}
929
+ patient_info = (
930
+ f"Age: {pi.get('age','N/A')}, "
931
+ f"Diabetic: {pi.get('diabetic','N/A')}, "
932
+ f"Allergies: {pi.get('allergies','N/A')}, "
933
+ f"Date of Wound: {pi.get('date_of_injury','N/A')}, "
934
+ f"Professional Care: {pi.get('professional_care','N/A')}, "
935
+ f"Oozing/Bleeding: {pi.get('oozing_bleeding','N/A')}, "
936
+ f"Infection: {pi.get('infection','N/A')}, "
937
+ f"Moisture: {pi.get('moisture','N/A')}"
938
+ )
939
+
940
+ query = (
941
+ f"best practices for managing a {visual_results.get('wound_type','Unknown')} "
942
+ f"with moisture '{pi.get('moisture','unknown')}' and infection '{pi.get('infection','unknown')}' "
943
+ f"in a diabetic status '{pi.get('diabetic','unknown')}'"
944
+ )
945
  guideline_context = self.query_guidelines(query)
946
 
947
+ report = self.generate_final_report(patient_info, visual_results, guideline_context, image_pil)
948
+
949
+ return {
950
+ "success": True,
951
+ "visual_analysis": visual_results,
952
+ "report": report,
953
+ "saved_image_path": saved_path,
954
+ "guideline_context": (guideline_context or "")[:500] + (
955
+ "..." if guideline_context and len(guideline_context) > 500 else ""
956
+ ),
957
+ }
958
+ except Exception as e:
959
+ logging.error(f"Pipeline error: {e}")
960
+ return {
961
+ "success": False,
962
+ "error": str(e),
963
+ "visual_analysis": {},
964
+ "report": f"Analysis failed: {str(e)}",
965
+ "saved_image_path": None,
966
+ "guideline_context": "",
967
+ }
968
+
969
+ def analyze_wound(self, image, questionnaire_data: Dict) -> Dict:
970
+ try:
971
+ if isinstance(image, str):
972
+ if not os.path.exists(image):
973
+ raise ValueError(f"Image file not found: {image}")
974
+ image_pil = Image.open(image)
975
+ elif isinstance(image, Image.Image):
976
+ image_pil = image
977
+ elif isinstance(image, np.ndarray):
978
+ image_pil = Image.fromarray(image)
979
+ else:
980
+ raise ValueError(f"Unsupported image type: {type(image)}")
981
 
982
+ return self.full_analysis_pipeline(image_pil, questionnaire_data or {})
983
  except Exception as e:
984
+ logging.error(f"Wound analysis error: {e}")
985
+ return {
986
+ "success": False,
987
+ "error": str(e),
988
+ "visual_analysis": {},
989
+ "report": f"Analysis initialization failed: {str(e)}",
990
+ "saved_image_path": None,
991
+ "guideline_context": "",
992
+ }