Skip to content

Commit f48ad67

Browse files
committed
Fixed Hugging Face progress bar issue
1 parent e8176cf commit f48ad67

File tree

6 files changed

+306
-53
lines changed

6 files changed

+306
-53
lines changed

CHANGELOG.md

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,27 @@
22

33
All notable changes to LocalLab will be documented in this file.
44

5+
## [0.5.7] - 2024-05-01
6+
7+
### Improved
8+
9+
- Redesigned all UI banners with modern, aesthetic styling
10+
- Enhanced INITIALIZING and RUNNING banners with box-style borders and improved spacing
11+
- Redesigned ngrok tunnel banner with a modern box layout and better visual hierarchy
12+
- Added informative notes to the ngrok banner for better user guidance
13+
- Improved overall visual consistency and readability across all UI elements
14+
- Enhanced color scheme for better visual appeal and readability
15+
16+
## [0.5.6] - 2024-05-01
17+
18+
### Fixed
19+
20+
- Fixed model download progress bars to display sequentially instead of interleaved
21+
- Implemented custom progress bar handler for HuggingFace Hub downloads
22+
- Added proper synchronization for multiple concurrent download progress bars
23+
- Enhanced logging during model downloads for better readability
24+
- Improved visual clarity of download progress information
25+
526
## [0.5.5] - 2024-04-30
627

728
### Fixed

locallab/model_manager.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
)
1414
from .logger.logger import logger, log_model_loaded, log_model_unloaded
1515
from .utils import check_resource_availability, get_device, format_model_size
16+
from .utils.progress import configure_hf_hub_progress
1617
import gc
1718
from colorama import Fore, Style
1819
import asyncio
@@ -21,6 +22,9 @@
2122
import tempfile
2223
import json
2324

25+
# Configure HuggingFace Hub progress bars
26+
configure_hf_hub_progress()
27+
2428
QUANTIZATION_SETTINGS = {
2529
"fp16": {
2630
"load_in_8bit": False,
@@ -257,24 +261,33 @@ async def _load_model_with_optimizations(self, model_id: str):
257261
# Apply quantization settings
258262
quant_config = self._get_quantization_config()
259263

264+
# Log model loading start
265+
logger.info(f"Starting download and loading of model: {model_id}")
266+
260267
# Load tokenizer first
268+
logger.info(f"Loading tokenizer for {model_id}...")
261269
self.tokenizer = AutoTokenizer.from_pretrained(
262270
model_id,
263271
token=hf_token if hf_token else None
264272
)
273+
logger.info(f"Tokenizer loaded successfully")
265274

266275
# Load model with optimizations
276+
logger.info(f"Loading model weights for {model_id}...")
267277
self.model = AutoModelForCausalLM.from_pretrained(
268278
model_id,
269279
token=hf_token if hf_token else None,
270280
**quant_config
271281
)
282+
logger.info(f"Model weights loaded successfully")
272283

273284
# Apply additional optimizations
285+
logger.info(f"Applying optimizations to model...")
274286
self.model = self._apply_optimizations(self.model)
275287

276288
# Set model to evaluation mode
277289
self.model.eval()
290+
logger.info(f"Model ready for inference")
278291

279292
return self.model
280293

@@ -347,7 +360,7 @@ async def generate(
347360

348361
if not self.model or not self.tokenizer:
349362
raise HTTPException(
350-
status_code=400,
363+
status_code=400,
351364
detail="No model is currently loaded. Please load a model first using the /models/load endpoint."
352365
)
353366

@@ -969,13 +982,20 @@ async def load_custom_model(self, model_name: str, fallback_model: Optional[str]
969982
llm_int8_threshold=6.0
970983
)
971984

985+
# Load tokenizer first
986+
logger.info(f"Loading tokenizer for custom model {model_name}...")
972987
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
988+
logger.info(f"Tokenizer loaded successfully")
989+
990+
# Load model with optimizations
991+
logger.info(f"Loading model weights for custom model {model_name}...")
973992
self.model = AutoModelForCausalLM.from_pretrained(
974993
model_name,
975994
torch_dtype=torch.float16,
976995
device_map="auto",
977996
quantization_config=quant_config
978997
)
998+
logger.info(f"Model weights loaded successfully")
979999

9801000
self.model = self._apply_optimizations(self.model)
9811001

locallab/ui/banners.py

Lines changed: 84 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -13,34 +13,56 @@ def print_initializing_banner(version: str = "0.4.25"):
1313
Print the initializing banner with clear visual indication
1414
that the server is starting up and not ready for requests
1515
"""
16-
startup_banner = f"""
17-
{Fore.CYAN}════════════════════════════════════════════════════════════════════════{Style.RESET_ALL}
16+
# Calculate banner width
17+
banner_width = 80
1818

19-
{Fore.GREEN}LocalLab Server v{version}{Style.RESET_ALL}
20-
{Fore.CYAN}Your lightweight AI inference server for running LLMs locally{Style.RESET_ALL}
19+
# Create horizontal lines with modern styling
20+
h_line = f"{Fore.CYAN}{'═' * banner_width}{Style.RESET_ALL}"
2121

22-
{Fore.BLUE}
22+
# Create the LocalLab ASCII art with improved spacing and color
23+
locallab_ascii = f"""{Fore.BLUE}
2324
██╗ ██████╗ ██████╗ █████╗ ██╗ ██╗ █████╗ ██████╗
2425
██║ ██╔═══██╗██╔════╝██╔══██╗██║ ██║ ██╔══██╗██╔══██╗
2526
██║ ██║ ██║██║ ███████║██║ ██║ ███████║██████╔╝
2627
██║ ██║ ██║██║ ██╔══██║██║ ██║ ██╔══██║██╔══██╗
2728
███████╗╚██████╔╝╚██████╗██║ ██║███████╗███████╗██║ ██║██████╔╝
28-
╚══════╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝╚═════╝
29-
{Style.RESET_ALL}
29+
╚══════╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝╚═════╝ {Style.RESET_ALL}"""
30+
31+
# Create status box with modern styling
32+
status_box_top = f"{Fore.YELLOW}{'━' * (banner_width - 2)}{Style.RESET_ALL}"
33+
status_title = f"{Fore.YELLOW}{' ' * ((banner_width - 20) // 2)}⚠️ INITIALIZING ⚠️{' ' * ((banner_width - 20) // 2 + (banner_width - 20) % 2)}{Style.RESET_ALL}"
34+
status_empty = f"{Fore.YELLOW}{' ' * (banner_width - 2)}{Style.RESET_ALL}"
35+
status_bullet1 = f"{Fore.YELLOW}┃ • {Fore.WHITE}Server is starting up - please wait{' ' * (banner_width - 41)}{Style.RESET_ALL}"
36+
status_bullet2 = f"{Fore.YELLOW}┃ • {Fore.WHITE}Do not make API requests yet{' ' * (banner_width - 36)}{Style.RESET_ALL}"
37+
status_bullet3 = f"{Fore.YELLOW}┃ • {Fore.WHITE}Wait for the \"RUNNING\" banner to appear{' ' * (banner_width - 48)}{Style.RESET_ALL}"
38+
status_box_bottom = f"{Fore.YELLOW}{'━' * (banner_width - 2)}{Style.RESET_ALL}"
39+
40+
# Create status indicator with modern styling
41+
status_indicator = f"⏳ Status: {Fore.YELLOW}INITIALIZING{Style.RESET_ALL}"
42+
loading_indicator = f"🔄 Loading components and checking environment..."
43+
44+
# Assemble the complete banner
45+
startup_banner = f"""
46+
{h_line}
47+
48+
{Fore.GREEN}LocalLab Server v{version}{Style.RESET_ALL}
49+
{Fore.CYAN}Your lightweight AI inference server for running LLMs locally{Style.RESET_ALL}
3050
31-
{Fore.YELLOW}▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄
32-
⚠️ INITIALIZING ⚠️
51+
{locallab_ascii}
3352
34-
• Server is starting up - please wait
35-
• Do not make API requests yet
36-
• Wait for the "RUNNING" banner to appear
37-
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
38-
{Style.RESET_ALL}
53+
{status_box_top}
54+
{status_title}
55+
{status_empty}
56+
{status_bullet1}
57+
{status_bullet2}
58+
{status_bullet3}
59+
{status_empty}
60+
{status_box_bottom}
3961
40-
{Fore.CYAN}════════════════════════════════════════════════════════════════════════{Style.RESET_ALL}
62+
{h_line}
4163
42-
⏳ Status: {Fore.YELLOW}INITIALIZING{Style.RESET_ALL}
43-
🔄 Loading components and checking environment...
64+
{status_indicator}
65+
{loading_indicator}
4466
4567
"""
4668
print(startup_banner, flush=True)
@@ -52,33 +74,60 @@ def print_running_banner(version: str):
5274
that the server is now ready to accept API requests
5375
"""
5476
try:
55-
running_banner = f"""
56-
{Fore.CYAN}════════════════════════════════════════════════════════════════════════{Style.RESET_ALL}
77+
# Calculate banner width
78+
banner_width = 80
5779

58-
{Fore.GREEN}LocalLab Server v{version}{Style.RESET_ALL} - {Fore.YELLOW}READY FOR REQUESTS{Style.RESET_ALL}
59-
{Fore.CYAN}Your AI model is now running and ready to process requests{Style.RESET_ALL}
80+
# Create horizontal lines with modern styling
81+
h_line = f"{Fore.CYAN}{'═' * banner_width}{Style.RESET_ALL}"
6082

61-
{Fore.GREEN}
83+
# Create the LocalLab ASCII art with improved spacing and color
84+
locallab_ascii = f"""{Fore.GREEN}
6285
██╗ ██████╗ ██████╗ █████╗ ██╗ ██╗ █████╗ ██████╗
6386
██║ ██╔═══██╗██╔════╝██╔══██╗██║ ██║ ██╔══██╗██╔══██╗
6487
██║ ██║ ██║██║ ███████║██║ ██║ ███████║██████╔╝
6588
██║ ██║ ██║██║ ██╔══██║██║ ██║ ██╔══██║██╔══██╗
6689
███████╗╚██████╔╝╚██████╗██║ ██║███████╗███████╗██║ ██║██████╔╝
67-
╚══════╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝╚═════╝
68-
{Style.RESET_ALL}
69-
{Fore.GREEN}▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄
70-
✅ RUNNING ✅
71-
72-
• Server is ready - you can now make API requests
73-
• Prefer to use the client packages for easier interaction
74-
• Model loading will continue in the background
75-
• API documentation is available below
76-
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
77-
{Style.RESET_ALL}
90+
╚══════╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝╚═╝ ╚═╝╚═════╝ {Style.RESET_ALL}"""
91+
92+
# Create status box with modern styling
93+
status_box_top = f"{Fore.GREEN}{'━' * (banner_width - 2)}{Style.RESET_ALL}"
94+
status_title = f"{Fore.GREEN}{' ' * ((banner_width - 16) // 2)}✅ RUNNING ✅{' ' * ((banner_width - 16) // 2 + (banner_width - 16) % 2)}{Style.RESET_ALL}"
95+
status_empty = f"{Fore.GREEN}{' ' * (banner_width - 2)}{Style.RESET_ALL}"
96+
status_bullet1 = f"{Fore.GREEN}┃ • {Fore.WHITE}Server is ready - you can now make API requests{' ' * (banner_width - 53)}{Style.RESET_ALL}"
97+
status_bullet2 = f"{Fore.GREEN}┃ • {Fore.WHITE}Prefer to use the client packages for easier interaction{' ' * (banner_width - 65)}{Style.RESET_ALL}"
98+
status_bullet3 = f"{Fore.GREEN}┃ • {Fore.WHITE}Model loading will continue in the background{' ' * (banner_width - 52)}{Style.RESET_ALL}"
99+
status_bullet4 = f"{Fore.GREEN}┃ • {Fore.WHITE}API documentation is available below{' ' * (banner_width - 45)}{Style.RESET_ALL}"
100+
status_box_bottom = f"{Fore.GREEN}{'━' * (banner_width - 2)}{Style.RESET_ALL}"
101+
102+
# Create status indicator with modern styling
103+
status_indicator = f"🚀 Status: {Fore.GREEN}RUNNING{Style.RESET_ALL}"
104+
ready_indicator = f"✨ Your AI model is now running and ready to process requests"
105+
106+
# Assemble the complete banner
107+
running_banner = f"""
108+
{h_line}
78109
79-
{Fore.CYAN}════════════════════════════════════════════════════════════════════════{Style.RESET_ALL}
80-
"""
110+
{Fore.GREEN}LocalLab Server v{version}{Style.RESET_ALL} - {Fore.YELLOW}READY FOR REQUESTS{Style.RESET_ALL}
111+
{Fore.CYAN}Your AI model is now running and ready to process requests{Style.RESET_ALL}
81112
113+
{locallab_ascii}
114+
115+
{status_box_top}
116+
{status_title}
117+
{status_empty}
118+
{status_bullet1}
119+
{status_bullet2}
120+
{status_bullet3}
121+
{status_bullet4}
122+
{status_empty}
123+
{status_box_bottom}
124+
125+
{h_line}
126+
127+
{status_indicator}
128+
{ready_indicator}
129+
130+
"""
82131
# Make sure we flush the output to ensure it appears
83132
print(running_banner, flush=True)
84133

locallab/utils/__init__.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,21 @@
1212
format_model_size,
1313
get_system_resources
1414
)
15+
from .progress import configure_hf_hub_progress
1516

1617
__all__ = [
1718
# Networking utilities
1819
'is_port_in_use',
1920
'setup_ngrok',
20-
21+
2122
# System utilities
2223
'get_system_memory',
2324
'get_gpu_memory',
2425
'check_resource_availability',
2526
'get_device',
2627
'format_model_size',
27-
'get_system_resources'
28-
]
28+
'get_system_resources',
29+
30+
# Progress utilities
31+
'configure_hf_hub_progress'
32+
]

locallab/utils/networking.py

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -51,26 +51,41 @@ def setup_ngrok(port: int) -> Optional[str]:
5151

5252
# Calculate banner width based on URL length (minimum 80 characters)
5353
url_length = len(public_url)
54-
banner_width = max(80, url_length + 20) # Add padding for aesthetics
54+
banner_width = max(80, url_length + 30) # Add padding for aesthetics
5555

56-
# Create dynamic width horizontal lines with thicker borders
57-
h_line_top = "▄" * banner_width
58-
h_line_bottom = "▀" * banner_width
56+
# Create modern box-style banner with rounded corners
57+
box_top = f"{Fore.CYAN}{'─' * (banner_width - 2)}{Style.RESET_ALL}"
58+
box_bottom = f"{Fore.CYAN}{'─' * (banner_width - 2)}{Style.RESET_ALL}"
5959

60-
# Create centered title
60+
# Create empty line for spacing
61+
empty_line = f"{Fore.CYAN}{' ' * (banner_width - 2)}{Style.RESET_ALL}"
62+
63+
# Create centered title with sparkles
6164
title = "✨ NGROK TUNNEL ACTIVE ✨"
6265
title_padding = (banner_width - len(title)) // 2
63-
padded_title = " " * title_padding + title + " " * title_padding
64-
# Adjust if odd number
65-
if len(padded_title) < banner_width:
66-
padded_title += " "
66+
title_line = f"{Fore.CYAN}{' ' * title_padding}{Fore.MAGENTA}{title}{Fore.CYAN}{' ' * (banner_width - 2 - len(title) - title_padding)}{Style.RESET_ALL}"
67+
68+
# Create URL line with proper padding
69+
url_label = "Public URL: "
70+
url_padding_left = 4 # Left padding for aesthetics
71+
url_line = f"{Fore.CYAN}{' ' * url_padding_left}{Fore.GREEN}{url_label}{Fore.YELLOW}{public_url}{' ' * (banner_width - 2 - len(url_label) - len(public_url) - url_padding_left)}{Fore.CYAN}{Style.RESET_ALL}"
72+
73+
# Create note line
74+
note = "🔗 Your server is now accessible from anywhere via this URL"
75+
note_padding = (banner_width - len(note)) // 2
76+
note_line = f"{Fore.CYAN}{' ' * note_padding}{Fore.WHITE}{note}{Fore.CYAN}{' ' * (banner_width - 2 - len(note) - note_padding)}{Style.RESET_ALL}"
6777

68-
# Display banner with no side borders and prominent top/bottom borders
78+
# Display modern box-style banner
6979
logger.info(f"""
70-
{Fore.CYAN}{h_line_top}{Style.RESET_ALL}
71-
{Fore.CYAN}{padded_title}{Style.RESET_ALL}
72-
{Fore.GREEN}Public URL: {Fore.YELLOW}{public_url}{Style.RESET_ALL}
73-
{Fore.CYAN}{h_line_bottom}{Style.RESET_ALL}
80+
{box_top}
81+
{empty_line}
82+
{title_line}
83+
{empty_line}
84+
{url_line}
85+
{empty_line}
86+
{note_line}
87+
{empty_line}
88+
{box_bottom}
7489
""")
7590
return public_url
7691

0 commit comments

Comments
 (0)