Skip to content

Added local-runner requirements validation step #712

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 22, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 15 additions & 44 deletions clarifai/cli/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@
import click

from clarifai.cli.base import cli, pat_display
from clarifai.utils.cli import validate_context
from clarifai.utils.cli import (
check_ollama_installed,
check_requirements_installed,
customize_ollama_model,
validate_context,
)
from clarifai.utils.constants import (
DEFAULT_LOCAL_RUNNER_APP_ID,
DEFAULT_LOCAL_RUNNER_COMPUTE_CLUSTER_CONFIG,
Expand All @@ -22,49 +27,6 @@
from clarifai.utils.misc import GitHubDownloader, clone_github_repo, format_github_repo_url


def customize_ollama_model(model_path, model_name, port, context_length):
"""Customize the Ollama model name in the cloned template files.
Args:
model_path: Path to the cloned model directory
model_name: The model name to set (e.g., 'llama3.1', 'mistral')

"""
model_py_path = os.path.join(model_path, "1", "model.py")

if not os.path.exists(model_py_path):
logger.warning(f"Model file {model_py_path} not found, skipping model name customization")
return

try:
# Read the model.py file
with open(model_py_path, 'r') as file:
content = file.read()
if model_name:
# Replace the default model name in the load_model method
content = content.replace(
'self.model = os.environ.get("OLLAMA_MODEL_NAME", \'llama3.2\')',
f'self.model = os.environ.get("OLLAMA_MODEL_NAME", \'{model_name}\')',
)

if port:
# Replace the default port variable in the model.py file
content = content.replace("PORT = '23333'", f"PORT = '{port}'")

if context_length:
# Replace the default context length variable in the model.py file
content = content.replace(
"context_length = '8192'", f"context_length = '{context_length}'"
)

# Write the modified content back to model.py
with open(model_py_path, 'w') as file:
file.write(content)

except Exception as e:
logger.error(f"Failed to customize Ollama model name in {model_py_path}: {e}")
raise


@cli.group(
['model'], context_settings={'max_content_width': shutil.get_terminal_size().columns - 10}
)
Expand Down Expand Up @@ -164,6 +126,11 @@ def init(

# --toolkit option
if toolkit == 'ollama':
if not check_ollama_installed():
logger.error(
"Ollama is not installed. Please install it from `https://ollama.com/` to use the Ollama toolkit."
)
raise click.Abort()
github_url = DEFAULT_OLLAMA_MODEL_REPO
branch = DEFAULT_OLLAMA_MODEL_REPO_BRANCH

Expand Down Expand Up @@ -858,6 +825,10 @@ def local_runner(ctx, model_path, pool_size):
ModelBuilder._save_config(config_file, config)

builder = ModelBuilder(model_path, download_validation_only=True)
if not check_requirements_installed(model_path):
logger.error(f"Requirements not installed for model at {model_path}.")
raise click.Abort()

# don't mock for local runner since you need the dependencies to run the code anyways.
method_signatures = builder.get_method_signatures(mocking=False)

Expand Down
125 changes: 125 additions & 0 deletions clarifai/utils/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,3 +220,128 @@ def validate_context_auth(pat: str, user_id: str, api_base: str = None):
logger.error(f"❌ Validation failed: \n{error_msg}")
logger.error("Please check your credentials and try again.")
raise click.Abort() # Exit without saving the configuration


def customize_ollama_model(model_path, model_name, port, context_length):
"""Customize the Ollama model name in the cloned template files.
Args:
model_path: Path to the cloned model directory
model_name: The model name to set (e.g., 'llama3.1', 'mistral')

"""
model_py_path = os.path.join(model_path, "1", "model.py")

if not os.path.exists(model_py_path):
logger.warning(f"Model file {model_py_path} not found, skipping model name customization")
return

try:
# Read the model.py file
with open(model_py_path, 'r') as file:
content = file.read()
if model_name:
# Replace the default model name in the load_model method
content = content.replace(
'self.model = os.environ.get("OLLAMA_MODEL_NAME", \'llama3.2\')',
f'self.model = os.environ.get("OLLAMA_MODEL_NAME", \'{model_name}\')',
)

if port:
# Replace the default port variable in the model.py file
content = content.replace("PORT = '23333'", f"PORT = '{port}'")

if context_length:
# Replace the default context length variable in the model.py file
content = content.replace(
"context_length = '8192'", f"context_length = '{context_length}'"
)

# Write the modified content back to model.py
with open(model_py_path, 'w') as file:
file.write(content)

except Exception as e:
logger.error(f"Failed to customize Ollama model name in {model_py_path}: {e}")
raise


def check_ollama_installed():
"""Check if the Ollama CLI is installed."""
try:
import subprocess

result = subprocess.run(
['ollama', '--version'], capture_output=True, text=True, check=False
)
if result.returncode == 0:
return True
else:
return False
except FileNotFoundError:
return False


def _is_package_installed(package_name):
"""Helper function to check if a single package in requirements.txt is installed."""
import importlib.metadata

try:
importlib.metadata.distribution(package_name)
logger.debug(f"✅ {package_name} - installed")
return True
except importlib.metadata.PackageNotFoundError:
logger.debug(f"❌ {package_name} - not installed")
return False
except Exception as e:
logger.warning(f"Error checking {package_name}: {e}")
return False


def check_requirements_installed(model_path):
"""Check if all dependencies in requirements.txt are installed."""
import re
from pathlib import Path

requirements_path = Path(model_path) / "requirements.txt"

if not requirements_path.exists():
logger.warning(f"requirements.txt not found at {requirements_path}")
return True

try:
package_pattern = re.compile(r'^([a-zA-Z0-9_-]+)')

# Getting package name and version (for logging)
requirements = [
(match.group(1), pack)
for line in requirements_path.read_text().splitlines()
if (pack := line.strip())
and not line.startswith('#')
and (match := package_pattern.match(line))
]

if not requirements:
logger.info("No dependencies found in requirements.txt")
return True

logger.info(f"Checking {len(requirements)} dependencies...")

missing = [
full_req
for package_name, full_req in requirements
if not _is_package_installed(package_name)
]

if not missing:
logger.info(f"✅ All {len(requirements)} dependencies are installed!")
return True

# Report missing packages
logger.error(f"❌ {len(missing)} out of {len(requirements)} dependencies are missing:")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we make the logger message more descriptive for better clarity? like ❌ {len(missing)} of {len(requirements)} required packages are missing in the current environment

logger.error("\n".join(f" - {pkg}" for pkg in missing))
logger.warning(f"To install: pip install -r {requirements_path}")
return False

except Exception as e:
logger.error(f"Failed to check requirements: {e}")
return False
Loading