forked from NVIDIA-AI-Blueprints/vulnerability-analysis
-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
executable file
·137 lines (125 loc) · 5.2 KB
/
docker-compose.yml
File metadata and controls
executable file
·137 lines (125 loc) · 5.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set project name environment variable to avoid container collision in shared environment
name: ${DOCKER_COMPOSE_PROJECT_NAME:-vuln_analysis}
services:
vuln-analysis:
image: vuln-analysis:2.0.0
init: true
build:
context: ./
dockerfile: ./Dockerfile
target: runtime
ports:
- "${WORKFLOW_HOST_JUPYTER_PORT:-8000}:8000"
- "${WORKFLOW_HOST_SCAN_PORT:-26466}:26466"
- "${WORKFLOW_HOST_PHOENIX_PORT:-6006}:6006"
working_dir: /workspace
networks:
- app_network
environment:
- TERM=${TERM:-}
- HF_HUB_CACHE=/workspace/.cache/huggingface
- XDG_CACHE_HOME=/workspace/.cache/am_cache # Allow logs to be written back to host
# Required API Keys
- NVD_API_KEY=${NVD_API_KEY:?"NVD_API_KEY is required"}
- NVIDIA_API_KEY=${NVIDIA_API_KEY:?"NVIDIA_API_KEY is required"}
- SERPAPI_API_KEY=${SERPAPI_API_KEY:?"SERPAPI_API_KEY is required"}
# Optional API Keys
- GHSA_API_KEY=${GHSA_API_KEY:-""}
- NGC_API_KEY=${NGC_API_KEY:-""}
- NGC_ORG_ID=${NGC_ORG_ID:-""}
- OPENAI_API_KEY=${OPENAI_API_KEY:-""}
# Base URLs for API endpoints
- CWE_DETAILS_BASE_URL=${CWE_DETAILS_BASE_URL:-http://nginx-cache/cwe-details}
- DEPSDEV_BASE_URL=${DEPSDEV_BASE_URL:-http://nginx-cache/depsdev}
- FIRST_BASE_URL=${FIRST_BASE_URL:-http://nginx-cache/first}
- GHSA_BASE_URL=${GHSA_BASE_URL:-http://nginx-cache/ghsa}
- NGC_API_BASE=${NGC_API_BASE:-http://nginx-cache/nemo/v1}
- NIM_EMBED_BASE_URL=${NIM_EMBED_BASE_URL:-http://nginx-cache/nim_embed/v1}
- NVD_BASE_URL=${NVD_BASE_URL:-http://nginx-cache/nvd}
- NVIDIA_API_BASE=${NVIDIA_API_BASE:-http://nginx-cache/nim_llm/v1}
- OPENAI_API_BASE=${OPENAI_API_BASE:-http://nginx-cache/openai/v1} # Used by `langchain` for embedding generation
- OPENAI_BASE_URL=${OPENAI_BASE_URL:-http://nginx-cache/openai/v1} # Used by `openai` for LLM inference
- RHSA_BASE_URL=${RHSA_BASE_URL:-http://nginx-cache/rhsa}
- SERPAPI_BASE_URL=${SERPAPI_BASE_URL:-http://nginx-cache/serpapi}
- UBUNTU_BASE_URL=${UBUNTU_BASE_URL:-http://nginx-cache/ubuntu}
# Model names
- CHECKLIST_MODEL_NAME=${CHECKLIST_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- CODE_VDB_RETRIEVER_MODEL_NAME=${CODE_VDB_RETRIEVER_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- DOC_VDB_RETRIEVER_MODEL_NAME=${DOC_VDB_RETRIEVER_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- CVE_AGENT_EXECUTOR_MODEL_NAME=${CVE_AGENT_EXECUTOR_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- GENERATE_CVSS_MODEL_NAME=${GENERATE_CVSS_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- SUMMARIZE_MODEL_NAME=${SUMMARIZE_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- JUSTIFY_MODEL_NAME=${JUSTIFY_MODEL_NAME:-meta/llama-3.1-70b-instruct}
- EMBEDDER_MODEL_NAME=${EMBEDDER_MODEL_NAME:-nvidia/nv-embedqa-e5-v5}
volumes:
- ./:/workspace
- /workspace/.venv
cap_add:
- sys_nice
depends_on:
- nginx-cache
restart: always
nginx-cache:
image: nginx
volumes:
- ./nginx/nginx_cache.conf:/etc/nginx/nginx.conf:ro
- ./nginx/logs:/var/log/nginx
- ./nginx/templates:/etc/nginx/templates:ro
- service-cache:/server_cache_intel:rw
- llm-cache:/server_cache_llm:rw
ports:
# Set custom ports in environment variables to avoid port collision
- "${NGINX_HOST_HTTP_PORT:-8080}:80"
environment:
# API Keys
- GHSA_API_KEY=${GHSA_API_KEY:-""}
- NGC_API_KEY=${NGC_API_KEY:-""}
- NGC_ORG_ID=${NGC_ORG_ID:-""}
- NVD_API_KEY=${NVD_API_KEY:-""}
- NVIDIA_API_KEY=${NVIDIA_API_KEY:-""}
- OPENAI_API_KEY=${OPENAI_API_KEY:-""}
# Route Variables
- NGINX_UPSTREAM_NVAI=${NGINX_UPSTREAM_NVAI:-https://api.nvcf.nvidia.com}
- NGINX_UPSTREAM_NIM_LLM=${NGINX_UPSTREAM_NIM_LLM:-https://integrate.api.nvidia.com}
- NGINX_UPSTREAM_NIM_EMBED=${NGINX_UPSTREAM_NIM_EMBED:-https://integrate.api.nvidia.com}
networks:
- app_network
restart: always
nginx-ssl:
image: nginx
volumes:
- ./nginx/nginx_ssl.conf:/etc/nginx/nginx.conf:ro
- ./nginx/key.pem:/etc/nginx/ssl/key.pem
- ./nginx/cert.pem:/etc/nginx/ssl/cert.pem
ports:
# Set custom ports in environment variables to avoid port collision
- "${NGINX_HOST_HTTPS_PORT:-443}:443"
networks:
- app_network
depends_on:
- nginx-cache
profiles:
- ssl
restart: always
networks:
app_network:
driver: bridge
volumes:
service-cache:
driver: local
llm-cache:
driver: local