akhaliq HF Staff commited on
Commit
04093c9
ยท
1 Parent(s): 4868ca9

add minimax m2.1

Browse files
backend_api.py CHANGED
@@ -98,8 +98,9 @@ def get_cached_client(model_id: str, provider: str = "auto"):
98
 
99
  # Define models and languages here to avoid importing Gradio UI
100
  AVAILABLE_MODELS = [
 
101
  {"name": "GLM-4.7", "id": "zai-org/GLM-4.7", "description": "GLM-4.7 - Latest GLM model via HuggingFace Router with Novita provider", "supports_images": False},
102
- {"name": "GLM-4.6", "id": "zai-org/GLM-4.6", "description": "GLM-4.6 model via HuggingFace with Cerebras provider (Default)", "supports_images": False},
103
  {"name": "GLM-4.6V ๐Ÿ‘๏ธ", "id": "zai-org/GLM-4.6V:zai-org", "description": "GLM-4.6V vision model - supports image uploads for visual understanding", "supports_images": True},
104
  {"name": "DeepSeek V3", "id": "deepseek-ai/DeepSeek-V3", "description": "DeepSeek V3 - Fast model for code generation via HuggingFace Router with Novita provider", "supports_images": False},
105
  {"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1", "description": "DeepSeek R1 model for code generation via HuggingFace", "supports_images": False},
@@ -191,7 +192,7 @@ async def startup_event():
191
  class CodeGenerationRequest(BaseModel):
192
  query: str
193
  language: str = "html"
194
- model_id: str = "zai-org/GLM-4.6"
195
  provider: str = "auto"
196
  history: List[List[str]] = []
197
  agent_mode: bool = False
 
98
 
99
  # Define models and languages here to avoid importing Gradio UI
100
  AVAILABLE_MODELS = [
101
+ {"name": "MiniMax M2.1", "id": "MiniMaxAI/MiniMax-M2.1", "description": "MiniMax M2.1 - Enhanced model via HuggingFace Router (Default)", "supports_images": False},
102
  {"name": "GLM-4.7", "id": "zai-org/GLM-4.7", "description": "GLM-4.7 - Latest GLM model via HuggingFace Router with Novita provider", "supports_images": False},
103
+ {"name": "GLM-4.6", "id": "zai-org/GLM-4.6", "description": "GLM-4.6 model via HuggingFace with Cerebras provider", "supports_images": False},
104
  {"name": "GLM-4.6V ๐Ÿ‘๏ธ", "id": "zai-org/GLM-4.6V:zai-org", "description": "GLM-4.6V vision model - supports image uploads for visual understanding", "supports_images": True},
105
  {"name": "DeepSeek V3", "id": "deepseek-ai/DeepSeek-V3", "description": "DeepSeek V3 - Fast model for code generation via HuggingFace Router with Novita provider", "supports_images": False},
106
  {"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1", "description": "DeepSeek R1 model for code generation via HuggingFace", "supports_images": False},
 
192
  class CodeGenerationRequest(BaseModel):
193
  query: str
194
  language: str = "html"
195
+ model_id: str = "MiniMaxAI/MiniMax-M2.1"
196
  provider: str = "auto"
197
  history: List[List[str]] = []
198
  agent_mode: bool = False
backend_models.py CHANGED
@@ -13,8 +13,8 @@ def get_inference_client(model_id: str, provider: str = "auto"):
13
 
14
  Returns OpenAI-compatible client for all models or raises error if not configured.
15
  """
16
- if model_id == "MiniMaxAI/MiniMax-M2":
17
- # Use HuggingFace Router with Novita provider for MiniMax M2 model
18
  return OpenAI(
19
  base_url="https://router.huggingface.co/v1",
20
  api_key=os.getenv("HF_TOKEN"),
@@ -75,9 +75,9 @@ def get_real_model_id(model_id: str) -> str:
75
  # GLM-4.6 requires Cerebras provider suffix in model string for API calls
76
  return "zai-org/GLM-4.6:cerebras"
77
 
78
- elif model_id == "MiniMaxAI/MiniMax-M2":
79
- # MiniMax M2 needs Novita provider suffix
80
- return "MiniMaxAI/MiniMax-M2:novita"
81
 
82
  elif model_id == "moonshotai/Kimi-K2-Thinking":
83
  # Kimi K2 Thinking needs Together AI provider
 
13
 
14
  Returns OpenAI-compatible client for all models or raises error if not configured.
15
  """
16
+ if model_id == "MiniMaxAI/MiniMax-M2" or model_id == "MiniMaxAI/MiniMax-M2.1":
17
+ # Use HuggingFace Router with Novita provider for MiniMax M2 models
18
  return OpenAI(
19
  base_url="https://router.huggingface.co/v1",
20
  api_key=os.getenv("HF_TOKEN"),
 
75
  # GLM-4.6 requires Cerebras provider suffix in model string for API calls
76
  return "zai-org/GLM-4.6:cerebras"
77
 
78
+ elif model_id == "MiniMaxAI/MiniMax-M2" or model_id == "MiniMaxAI/MiniMax-M2.1":
79
+ # MiniMax M2 and M2.1 need Novita provider suffix
80
+ return f"{model_id}:novita"
81
 
82
  elif model_id == "moonshotai/Kimi-K2-Thinking":
83
  # Kimi K2 Thinking needs Together AI provider
frontend/src/app/page.tsx CHANGED
@@ -17,7 +17,7 @@ export default function Home() {
17
 
18
  const [generatedCode, setGeneratedCode] = useState('');
19
  const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
20
- const [selectedModel, setSelectedModel] = useState('zai-org/GLM-4.6');
21
  const [models, setModels] = useState<Model[]>([]);
22
  const [isGenerating, setIsGenerating] = useState(false);
23
  const [isAuthenticated, setIsAuthenticated] = useState(false);
 
17
 
18
  const [generatedCode, setGeneratedCode] = useState('');
19
  const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
20
+ const [selectedModel, setSelectedModel] = useState('MiniMaxAI/MiniMax-M2.1');
21
  const [models, setModels] = useState<Model[]>([]);
22
  const [isGenerating, setIsGenerating] = useState(false);
23
  const [isAuthenticated, setIsAuthenticated] = useState(false);
frontend/src/components/LandingPage.tsx CHANGED
@@ -31,7 +31,7 @@ export default function LandingPage({
31
  onImport,
32
  isAuthenticated,
33
  initialLanguage = 'html',
34
- initialModel = 'zai-org/GLM-4.7',
35
  onAuthChange,
36
  setPendingPR,
37
  pendingPRRef
@@ -513,8 +513,8 @@ ${isGradio ? '\n\nIMPORTANT: Only output app.py with the redesigned UI (themes,
513
  if (onStart) {
514
  // Pass duplicated space ID so auto-deploy updates it
515
  console.log('[Redesign] Calling onStart with duplicated repo ID:', duplicatedRepoId);
516
- console.log('[Redesign] Using GLM-4.7 for redesign');
517
- onStart(redesignPrompt, result.language || 'html', 'zai-org/GLM-4.7', undefined, duplicatedRepoId);
518
  }
519
  }, 100);
520
 
@@ -558,8 +558,8 @@ Note: After generating the redesign, I will create a Pull Request on the origina
558
 
559
  if (onStart) {
560
  console.log('[Redesign] Will create PR - not passing repo ID');
561
- console.log('[Redesign] Using GLM-4.7 for redesign');
562
- onStart(redesignPrompt, result.language || 'html', 'zai-org/GLM-4.7', undefined, repoId, true); // Pass true for shouldCreatePR
563
  }
564
 
565
  console.log('[Redesign] Will create PR after code generation completes');
 
31
  onImport,
32
  isAuthenticated,
33
  initialLanguage = 'html',
34
+ initialModel = 'MiniMaxAI/MiniMax-M2.1',
35
  onAuthChange,
36
  setPendingPR,
37
  pendingPRRef
 
513
  if (onStart) {
514
  // Pass duplicated space ID so auto-deploy updates it
515
  console.log('[Redesign] Calling onStart with duplicated repo ID:', duplicatedRepoId);
516
+ console.log('[Redesign] Using MiniMax M2.1 for redesign');
517
+ onStart(redesignPrompt, result.language || 'html', 'MiniMaxAI/MiniMax-M2.1', undefined, duplicatedRepoId);
518
  }
519
  }, 100);
520
 
 
558
 
559
  if (onStart) {
560
  console.log('[Redesign] Will create PR - not passing repo ID');
561
+ console.log('[Redesign] Using MiniMax M2.1 for redesign');
562
+ onStart(redesignPrompt, result.language || 'html', 'MiniMaxAI/MiniMax-M2.1', undefined, repoId, true); // Pass true for shouldCreatePR
563
  }
564
 
565
  console.log('[Redesign] Will create PR after code generation completes');