yassine-mhirsi commited on
Commit
c90fc3f
·
1 Parent(s): 9c149e4

Add multi-car detection functionality to Gradio app

Browse files

- Introduced methods to retrieve and process multi-car detection videos.
- Added a new tab in the Gradio interface for multi-car detection, allowing users to select and process videos.
- Updated configuration to include the multi-car detection model.
- Enhanced result formatting to provide detailed statistics and summaries for processed videos.

app/gradio_app.py CHANGED
@@ -10,6 +10,7 @@ import logging
10
  from app.services.pipeline import get_pipeline
11
  from app.utils.image_processing import numpy_to_pil
12
  from app.models.state_farm_model import get_state_farm_detector
 
13
  import os
14
  import glob
15
 
@@ -207,6 +208,82 @@ def get_state_farm_files():
207
  return files
208
 
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  def process_state_farm(file_path: str) -> Tuple:
211
  """
212
  Process State Farm distracted driver detection.
@@ -428,6 +505,81 @@ def create_interface():
428
  inputs=[file_selector],
429
  outputs=[output_image_state_farm, result_text_state_farm]
430
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
 
432
  # Footer
433
  gr.Markdown("""
@@ -435,12 +587,13 @@ def create_interface():
435
 
436
  ### 📚 About
437
 
438
- This application uses five state-of-the-art models:
439
  - **Car Detection**: `Safe-Drive-TN/Car-detection-from-scratch` (Custom CNN)
440
  - **Plate Detection**: `Safe-Drive-TN/Tunisian-Licence-plate-Detection` (YOLOv8n)
441
  - **Word Detection**: `Safe-Drive-TN/tunis-word-detection-yolov8s` (YOLOv8s)
442
  - **OCR**: `microsoft/trocr-base-printed` (TrOCR)
443
  - **State Farm Detection**: `Safe-Drive-TN/State-farm-detection` (YOLOv8n-cls)
 
444
 
445
  Made with ❤️
446
  """)
 
10
  from app.services.pipeline import get_pipeline
11
  from app.utils.image_processing import numpy_to_pil
12
  from app.models.state_farm_model import get_state_farm_detector
13
+ from app.services.multi_car_pipeline import get_multi_car_pipeline
14
  import os
15
  import glob
16
 
 
208
  return files
209
 
210
 
211
+ def get_multi_car_videos():
212
+ """Get list of available videos from datasets/multi-car/."""
213
+ base_path = "datasets/multi-car"
214
+ if not os.path.exists(base_path):
215
+ return []
216
+
217
+ # Get all video files
218
+ video_extensions = ['*.mp4', '*.avi', '*.mov', '*.mkv']
219
+
220
+ files = []
221
+ for ext in video_extensions:
222
+ files.extend(glob.glob(os.path.join(base_path, ext)))
223
+ files.extend(glob.glob(os.path.join(base_path, ext.upper())))
224
+
225
+ # Sort and return relative paths
226
+ files = sorted([os.path.relpath(f) for f in files])
227
+ return files
228
+
229
+
230
+ def process_multi_car_video(video_path: str) -> Tuple:
231
+ """
232
+ Process multi-car detection video.
233
+
234
+ Args:
235
+ video_path: Path to video file
236
+
237
+ Returns:
238
+ Tuple of (output_video_path, results text)
239
+ """
240
+ if not video_path or not os.path.exists(video_path):
241
+ return None, "Please select a video from the dropdown"
242
+
243
+ logger.info(f"🎨 Gradio: Processing Multi-Car detection - Video: {video_path}")
244
+ try:
245
+ # Get pipeline
246
+ pipeline = get_multi_car_pipeline()
247
+
248
+ # Process video
249
+ result = pipeline.process_video(video_path)
250
+
251
+ if not result['success']:
252
+ error_msg = result.get('error', 'Processing failed')
253
+ return None, f"**Error:** {error_msg}"
254
+
255
+ # Get detection summary
256
+ summary = pipeline.get_detection_summary(result['detections_per_frame'])
257
+
258
+ # Format result text
259
+ result_text = f"""
260
+ ## Video Processing Complete
261
+
262
+ ### **Output Video:** {os.path.basename(result['output_path'])}
263
+
264
+ ---
265
+
266
+ ### 📊 Detection Statistics:
267
+ - **Total Frames Processed:** {result['total_frames']}
268
+ - **Total Detections:** {summary['total_detections']}
269
+ - **Average Detections per Frame:** {summary['average_detections_per_frame']:.2f}
270
+ - **Max Detections in a Frame:** {summary['max_detections_per_frame']}
271
+
272
+ ### 🎯 Detected Classes:
273
+ """
274
+ for class_name, count in summary['class_counts'].items():
275
+ result_text += f"- **{class_name}:** {count} detections\n"
276
+
277
+ result_text += f"\n---\n\n### ⏱️ Processing Time: {result['processing_time']:.2f}s"
278
+
279
+ return result['output_path'], result_text
280
+
281
+ except Exception as e:
282
+ error_msg = f"Error processing video: {str(e)}"
283
+ logger.error(error_msg)
284
+ return None, f"**Error:** {error_msg}"
285
+
286
+
287
  def process_state_farm(file_path: str) -> Tuple:
288
  """
289
  Process State Farm distracted driver detection.
 
505
  inputs=[file_selector],
506
  outputs=[output_image_state_farm, result_text_state_farm]
507
  )
508
+
509
+ # Multi-Car Detection Tab
510
+ with gr.Tab("Multi-Car Detection"):
511
+ gr.Markdown("""
512
+ # 🚗 Multi-Car and Driver Detection
513
+
514
+ Select a video from the pre-loaded dataset to detect multiple cars and drivers.
515
+
516
+ **Model:** YOLO (Multiple Car Detection)
517
+
518
+ The model will process the video frame by frame and detect:
519
+ - Multiple cars
520
+ - Drivers
521
+ - Other objects as defined by the model
522
+
523
+ The output video will show bounding boxes and labels for all detected objects.
524
+ """)
525
+
526
+ with gr.Row():
527
+ with gr.Column(scale=1):
528
+ # Video selector dropdown
529
+ available_videos = get_multi_car_videos()
530
+ if not available_videos:
531
+ gr.Markdown("⚠️ **No videos found in datasets/multi-car/ directory**")
532
+ video_selector = gr.Dropdown(
533
+ choices=[],
534
+ label="Select Video",
535
+ interactive=False
536
+ )
537
+ else:
538
+ video_selector = gr.Dropdown(
539
+ choices=available_videos,
540
+ label="Select Video",
541
+ value=available_videos[0] if available_videos else None,
542
+ interactive=True
543
+ )
544
+
545
+ process_video_button = gr.Button("🎬 Process Video", variant="primary", size="lg")
546
+ result_text_multi_car = gr.Markdown()
547
+
548
+ with gr.Column(scale=1):
549
+ output_video_multi_car = gr.Video(label="Annotated Video Output")
550
+
551
+ def update_video_display(video_path):
552
+ """Update video display when file is selected."""
553
+ if not video_path or not os.path.exists(video_path):
554
+ return None, f"**Please select a video from the dropdown.**"
555
+
556
+ return video_path, f"**Video selected:** {os.path.basename(video_path)}\n\nClick 'Process Video' to detect cars and drivers."
557
+
558
+ def process_video_and_display(video_path):
559
+ """Process video and return results."""
560
+ if not video_path:
561
+ return None, "Please select a video"
562
+
563
+ output_video, result_text = process_multi_car_video(video_path)
564
+
565
+ if output_video and os.path.exists(output_video):
566
+ return output_video, result_text
567
+ else:
568
+ return None, result_text
569
+
570
+ # Update display when video is selected
571
+ video_selector.change(
572
+ fn=update_video_display,
573
+ inputs=[video_selector],
574
+ outputs=[output_video_multi_car, result_text_multi_car]
575
+ )
576
+
577
+ # Process when button is clicked
578
+ process_video_button.click(
579
+ fn=process_video_and_display,
580
+ inputs=[video_selector],
581
+ outputs=[output_video_multi_car, result_text_multi_car]
582
+ )
583
 
584
  # Footer
585
  gr.Markdown("""
 
587
 
588
  ### 📚 About
589
 
590
+ This application uses six state-of-the-art models:
591
  - **Car Detection**: `Safe-Drive-TN/Car-detection-from-scratch` (Custom CNN)
592
  - **Plate Detection**: `Safe-Drive-TN/Tunisian-Licence-plate-Detection` (YOLOv8n)
593
  - **Word Detection**: `Safe-Drive-TN/tunis-word-detection-yolov8s` (YOLOv8s)
594
  - **OCR**: `microsoft/trocr-base-printed` (TrOCR)
595
  - **State Farm Detection**: `Safe-Drive-TN/State-farm-detection` (YOLOv8n-cls)
596
+ - **Multi-Car Detection**: `Safe-Drive-TN/Multiple-Car-Detection` (YOLO)
597
 
598
  Made with ❤️
599
  """)
app/models/multi_car_detector.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multiple Car and Driver Detection model using YOLO from HuggingFace.
3
+ """
4
+ import numpy as np
5
+ from typing import Dict, List, Optional
6
+ from ultralytics import YOLO
7
+ from huggingface_hub import hf_hub_download
8
+
9
+ from app.utils.config import MULTI_CAR_DETECTION_MODEL, HF_TOKEN
10
+
11
+
12
+ class MultiCarDetector:
13
+ """
14
+ Detects multiple cars and drivers in images/videos using YOLO.
15
+ Model hosted on HuggingFace: Safe-Drive-TN/Multiple-Car-Detection
16
+ """
17
+
18
+ def __init__(self, confidence_threshold: float = 0.25):
19
+ """Initialize the multi-car detector model."""
20
+ self.model = None
21
+ self.confidence_threshold = confidence_threshold
22
+
23
+ def load_model(self):
24
+ """Load the YOLO model from HuggingFace."""
25
+ if self.model is not None:
26
+ return
27
+
28
+ try:
29
+ # Download model file from HuggingFace
30
+ model_path = hf_hub_download(
31
+ repo_id=MULTI_CAR_DETECTION_MODEL,
32
+ filename="Multiple-Car-Detection/Muliple_Car_Detection.pt",
33
+ token=HF_TOKEN
34
+ )
35
+
36
+ # Load YOLO model from downloaded file
37
+ self.model = YOLO(model_path)
38
+ print(f"Multi-car detection model loaded successfully from {MULTI_CAR_DETECTION_MODEL}")
39
+
40
+ except Exception as e:
41
+ print(f"Error loading multi-car detection model: {e}")
42
+ raise
43
+
44
+ def detect(self, image: np.ndarray) -> List[Dict]:
45
+ """
46
+ Detect cars and drivers in an image.
47
+
48
+ Args:
49
+ image: Input image as numpy array (BGR format)
50
+
51
+ Returns:
52
+ List of dictionaries, each containing:
53
+ - bbox: Bounding box as [x1, y1, x2, y2]
54
+ - confidence: Detection confidence score
55
+ - class_id: Class ID
56
+ - class_name: Class name (if available)
57
+ """
58
+ if self.model is None:
59
+ self.load_model()
60
+
61
+ try:
62
+ # Run inference
63
+ results = self.model(image, conf=self.confidence_threshold, verbose=False)
64
+
65
+ # Get detections
66
+ if len(results) == 0 or len(results[0].boxes) == 0:
67
+ return []
68
+
69
+ # Get all detections
70
+ boxes = results[0].boxes
71
+ detections = []
72
+
73
+ # Get class names if available
74
+ class_names = self.model.names if hasattr(self.model, 'names') else {}
75
+
76
+ for box in boxes:
77
+ bbox = box.xyxy[0].cpu().numpy().tolist() # [x1, y1, x2, y2]
78
+ confidence = float(box.conf[0].cpu().numpy())
79
+ class_id = int(box.cls[0].cpu().numpy())
80
+ class_name = class_names.get(class_id, f"class_{class_id}")
81
+
82
+ detections.append({
83
+ 'bbox': bbox,
84
+ 'confidence': confidence,
85
+ 'class_id': class_id,
86
+ 'class_name': class_name
87
+ })
88
+
89
+ # Sort by confidence (highest first)
90
+ detections.sort(key=lambda x: x['confidence'], reverse=True)
91
+
92
+ return detections
93
+
94
+ except Exception as e:
95
+ print(f"Error during multi-car detection: {e}")
96
+ return []
97
+
98
+ def predict_video(self, video_path: str, save_path: Optional[str] = None) -> Dict:
99
+ """
100
+ Process a video and return annotated video path.
101
+
102
+ Args:
103
+ video_path: Path to input video file
104
+ save_path: Optional path to save annotated video (if None, auto-generates)
105
+
106
+ Returns:
107
+ Dictionary containing:
108
+ - output_path: Path to annotated video
109
+ - total_frames: Total number of frames processed
110
+ - detections_per_frame: List of detections per frame
111
+ """
112
+ if self.model is None:
113
+ self.load_model()
114
+
115
+ try:
116
+ import os
117
+ from pathlib import Path
118
+
119
+ # Determine output path
120
+ if save_path is None:
121
+ # Create output directory
122
+ output_dir = Path("output/multi_car_detection")
123
+ output_dir.mkdir(parents=True, exist_ok=True)
124
+
125
+ # Generate output filename based on input filename
126
+ input_filename = Path(video_path).stem
127
+ save_path = str(output_dir / f"{input_filename}_annotated.mp4")
128
+
129
+ # Ensure output directory exists
130
+ output_dir = Path(save_path).parent
131
+ output_dir.mkdir(parents=True, exist_ok=True)
132
+
133
+ # Use YOLO's built-in video processing with visualization
134
+ # Use predict instead of track for more reliable output path control
135
+ results = self.model.predict(
136
+ source=video_path,
137
+ conf=self.confidence_threshold,
138
+ save=True,
139
+ save_txt=False,
140
+ save_conf=True,
141
+ project=str(output_dir.parent),
142
+ name=output_dir.name,
143
+ exist_ok=True,
144
+ verbose=False
145
+ )
146
+
147
+ # YOLO saves videos with the same name as input in the output directory
148
+ # Try to find the output video
149
+ input_filename = Path(video_path).stem
150
+ possible_outputs = [
151
+ output_dir / f"{input_filename}.mp4",
152
+ output_dir / f"{input_filename}.avi",
153
+ Path("runs/detect") / output_dir.name / f"{input_filename}.mp4",
154
+ Path("runs/detect") / output_dir.name / f"{input_filename}.avi",
155
+ ]
156
+
157
+ output_path = None
158
+ for possible_path in possible_outputs:
159
+ if possible_path.exists():
160
+ # If we want a specific save_path, copy it there
161
+ if str(possible_path) != save_path:
162
+ import shutil
163
+ shutil.copy2(possible_path, save_path)
164
+ output_path = save_path
165
+ break
166
+
167
+ # If still not found, search for any video files in output directory
168
+ if output_path is None:
169
+ video_files = list(output_dir.glob("*.mp4")) + list(output_dir.glob("*.avi"))
170
+ if video_files:
171
+ # Use the most recently modified one
172
+ output_path = str(max(video_files, key=lambda p: p.stat().st_mtime))
173
+ if str(output_path) != save_path:
174
+ import shutil
175
+ shutil.copy2(output_path, save_path)
176
+ output_path = save_path
177
+
178
+ # Count frames and get detection stats
179
+ total_frames = len(results) if isinstance(results, list) else 1
180
+ detections_per_frame = []
181
+
182
+ for result in results:
183
+ frame_detections = []
184
+ if hasattr(result, 'boxes') and result.boxes is not None:
185
+ for box in result.boxes:
186
+ bbox = box.xyxy[0].cpu().numpy().tolist()
187
+ confidence = float(box.conf[0].cpu().numpy())
188
+ class_id = int(box.cls[0].cpu().numpy())
189
+ class_names = self.model.names if hasattr(self.model, 'names') else {}
190
+ class_name = class_names.get(class_id, f"class_{class_id}")
191
+
192
+ frame_detections.append({
193
+ 'bbox': bbox,
194
+ 'confidence': confidence,
195
+ 'class_id': class_id,
196
+ 'class_name': class_name
197
+ })
198
+ detections_per_frame.append(frame_detections)
199
+
200
+ if output_path is None:
201
+ return {
202
+ 'output_path': None,
203
+ 'total_frames': total_frames,
204
+ 'detections_per_frame': detections_per_frame,
205
+ 'success': False,
206
+ 'error': 'Could not locate output video file'
207
+ }
208
+
209
+ return {
210
+ 'output_path': output_path,
211
+ 'total_frames': total_frames,
212
+ 'detections_per_frame': detections_per_frame,
213
+ 'success': True
214
+ }
215
+
216
+ except Exception as e:
217
+ print(f"Error during video processing: {e}")
218
+ import traceback
219
+ traceback.print_exc()
220
+ return {
221
+ 'output_path': None,
222
+ 'total_frames': 0,
223
+ 'detections_per_frame': [],
224
+ 'success': False,
225
+ 'error': str(e)
226
+ }
227
+
228
+
229
+ # Global instance
230
+ _multi_car_detector = None
231
+
232
+
233
+ def get_multi_car_detector(confidence_threshold: float = 0.25) -> MultiCarDetector:
234
+ """Get or create global multi-car detector instance."""
235
+ global _multi_car_detector
236
+ if _multi_car_detector is None:
237
+ _multi_car_detector = MultiCarDetector(confidence_threshold=confidence_threshold)
238
+ _multi_car_detector.load_model()
239
+ return _multi_car_detector
240
+
app/services/multi_car_pipeline.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-car video detection pipeline service.
3
+ """
4
+ import os
5
+ import time
6
+ import tempfile
7
+ from typing import Dict, Optional
8
+ from pathlib import Path
9
+
10
+ from app.models.multi_car_detector import get_multi_car_detector
11
+
12
+
13
+ class MultiCarVideoPipeline:
14
+ """
15
+ Pipeline for processing videos with multi-car and driver detection.
16
+ Processes videos frame by frame and returns annotated video with detections.
17
+ """
18
+
19
+ def __init__(self, confidence_threshold: float = 0.25):
20
+ """Initialize the pipeline with the multi-car detector."""
21
+ self.detector = get_multi_car_detector(confidence_threshold=confidence_threshold)
22
+
23
+ def process_video(self, video_path: str, output_path: Optional[str] = None) -> Dict:
24
+ """
25
+ Process a video and return annotated video with detections.
26
+
27
+ Args:
28
+ video_path: Path to input video file
29
+ output_path: Optional path to save annotated video (if None, uses temp file)
30
+
31
+ Returns:
32
+ Dictionary containing:
33
+ - success: Boolean indicating if processing was successful
34
+ - output_path: Path to annotated video
35
+ - total_frames: Total number of frames processed
36
+ - detections_per_frame: List of detections per frame
37
+ - processing_time: Time taken to process video
38
+ - error: Error message (if failed)
39
+ """
40
+ result = {
41
+ 'success': False,
42
+ 'output_path': None,
43
+ 'total_frames': 0,
44
+ 'detections_per_frame': [],
45
+ 'processing_time': 0.0,
46
+ 'error': None
47
+ }
48
+
49
+ if not os.path.exists(video_path):
50
+ result['error'] = f"Video file not found: {video_path}"
51
+ return result
52
+
53
+ start_time = time.time()
54
+
55
+ try:
56
+ # If no output path specified, create a temp file
57
+ if output_path is None:
58
+ # Create output directory if it doesn't exist
59
+ output_dir = Path("output/multi_car_detection")
60
+ output_dir.mkdir(parents=True, exist_ok=True)
61
+
62
+ # Generate output filename based on input filename
63
+ input_filename = Path(video_path).stem
64
+ output_path = str(output_dir / f"{input_filename}_annotated.mp4")
65
+
66
+ # Process video using detector
67
+ detection_result = self.detector.predict_video(video_path, save_path=output_path)
68
+
69
+ if not detection_result.get('success', False):
70
+ result['error'] = detection_result.get('error', 'Video processing failed')
71
+ return result
72
+
73
+ result['success'] = True
74
+ result['output_path'] = detection_result['output_path']
75
+ result['total_frames'] = detection_result['total_frames']
76
+ result['detections_per_frame'] = detection_result['detections_per_frame']
77
+ result['processing_time'] = time.time() - start_time
78
+
79
+ except Exception as e:
80
+ result['error'] = f"Pipeline error: {str(e)}"
81
+ result['processing_time'] = time.time() - start_time
82
+ print(f"Multi-car video pipeline error: {e}")
83
+
84
+ return result
85
+
86
+ def get_detection_summary(self, detections_per_frame: list) -> Dict:
87
+ """
88
+ Generate a summary of detections across all frames.
89
+
90
+ Args:
91
+ detections_per_frame: List of detections per frame
92
+
93
+ Returns:
94
+ Dictionary with detection statistics
95
+ """
96
+ total_detections = 0
97
+ class_counts = {}
98
+ max_detections_per_frame = 0
99
+
100
+ for frame_detections in detections_per_frame:
101
+ frame_count = len(frame_detections)
102
+ total_detections += frame_count
103
+ max_detections_per_frame = max(max_detections_per_frame, frame_count)
104
+
105
+ for detection in frame_detections:
106
+ class_name = detection.get('class_name', 'unknown')
107
+ class_counts[class_name] = class_counts.get(class_name, 0) + 1
108
+
109
+ return {
110
+ 'total_detections': total_detections,
111
+ 'total_frames': len(detections_per_frame),
112
+ 'average_detections_per_frame': total_detections / len(detections_per_frame) if detections_per_frame else 0,
113
+ 'max_detections_per_frame': max_detections_per_frame,
114
+ 'class_counts': class_counts
115
+ }
116
+
117
+
118
+ # Global pipeline instance
119
+ _multi_car_pipeline = None
120
+
121
+
122
+ def get_multi_car_pipeline(confidence_threshold: float = 0.25) -> MultiCarVideoPipeline:
123
+ """Get or create global multi-car video pipeline instance."""
124
+ global _multi_car_pipeline
125
+ if _multi_car_pipeline is None:
126
+ _multi_car_pipeline = MultiCarVideoPipeline(confidence_threshold=confidence_threshold)
127
+ return _multi_car_pipeline
128
+
app/utils/config.py CHANGED
@@ -14,6 +14,7 @@ WORD_DETECTION_MODEL = "Safe-Drive-TN/tunis-word-detection-yolov8s"
14
  OCR_MODEL = "microsoft/trocr-base-printed"
15
 
16
  STATE_FARM_MODEL = "Safe-Drive-TN/State-farm-detection"
 
17
 
18
  # HuggingFace Token
19
  HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
 
14
  OCR_MODEL = "microsoft/trocr-base-printed"
15
 
16
  STATE_FARM_MODEL = "Safe-Drive-TN/State-farm-detection"
17
+ MULTI_CAR_DETECTION_MODEL = "Safe-Drive-TN/Multiple-Car-Detection"
18
 
19
  # HuggingFace Token
20
  HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")