Skip to content

Commit 4e7e57c

Browse files
committed
Improved Hugging Face Logs
1 parent e9109bf commit 4e7e57c

File tree

6 files changed

+136
-31
lines changed

6 files changed

+136
-31
lines changed

CHANGELOG.md

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,19 @@
22

33
All notable changes to LocalLab will be documented in this file.
44

5-
## [0.6.2] - 2024-05-03
5+
## [0.6.2] - 2024-05-04
6+
7+
### Improved
8+
9+
- Improved model downloading experience by using HuggingFace's native progress bars
10+
- Fixed interleaved progress bars issue during model downloads
11+
- Added clear success messages after model downloads
612

713
### Fixed
814

915
- Fixed CLI configuration issue where optimization settings shown as enabled by default weren't being properly saved
10-
- Updated default values for all optimization settings (quantization, flash attention, CPU offloading, better transformer) to be enabled by default
16+
- Updated default values for all optimization settings to be enabled by default
1117
- Ensured consistency between displayed optimization settings and saved configuration
12-
- Fixed resource check functions to use correct default values for optimization settings
1318

1419
## [0.6.1] - 2024-05-02
1520

locallab/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
LocalLab - A lightweight AI inference server for running LLMs locally
33
"""
44

5-
__version__ = "0.6.2" # Updated to fix CLI optimization settings defaults
5+
__version__ = "0.6.2" # Updated to improve model downloading experience and fix CLI settings
66

77
# Only import what's necessary initially, lazy-load the rest
88
from .logger import get_logger

locallab/logger/__init__.py

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -109,13 +109,37 @@ class SubduedColoredFormatter(logging.Formatter):
109109
"""Formatter that adds subdued colors to regular logs and bright colors to important logs"""
110110

111111
def format(self, record):
112+
# Check if this is a HuggingFace Hub progress bar log
113+
# HuggingFace progress bars use tqdm which writes directly to stdout/stderr
114+
# We need to completely bypass our logger for these messages
115+
112116
# Check if we're currently downloading a model
113117
try:
114118
from ..utils.progress import is_model_downloading
115-
if is_model_downloading():
116-
# During model download, only show critical logs
117-
if record.levelno < logging.ERROR:
118-
# Skip non-critical logs during model download
119+
120+
# Check if this is a HuggingFace progress bar log
121+
is_hf_progress_log = False
122+
if hasattr(record, 'name') and isinstance(record.name, str):
123+
# HuggingFace Hub logs typically come from these modules
124+
hf_modules = ['huggingface_hub', 'filelock', 'transformers', 'tqdm']
125+
is_hf_progress_log = any(module in record.name for module in hf_modules)
126+
127+
# If we're downloading a model and this is a HuggingFace log, skip our formatting
128+
if is_model_downloading() and is_hf_progress_log:
129+
# Return empty string to skip this log in our logger
130+
# HuggingFace will handle displaying its own progress bars
131+
return ""
132+
133+
# For non-HuggingFace logs during model download, only show critical and model-related logs
134+
elif is_model_downloading() and record.levelno < logging.ERROR:
135+
# Check if this is a model-related log that should be shown
136+
is_model_log = False
137+
if hasattr(record, 'msg') and isinstance(record.msg, str):
138+
model_patterns = ['model', 'download', 'tokenizer', 'weight']
139+
is_model_log = any(pattern in record.msg.lower() for pattern in model_patterns)
140+
141+
# Skip non-critical and non-model logs during model download
142+
if not is_model_log:
119143
return ""
120144
except (ImportError, AttributeError):
121145
# If we can't import the function, continue as normal

locallab/model_manager.py

Lines changed: 51 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,19 @@ async def _load_model_with_optimizations(self, model_id: str):
264264
# Log model loading start
265265
logger.info(f"Starting download and loading of model: {model_id}")
266266
print(f"\n{Fore.GREEN}Downloading model: {model_id}{Style.RESET_ALL}")
267-
print(f"{Fore.CYAN}This may take a while depending on your internet speed...{Style.RESET_ALL}\n")
267+
print(f"{Fore.CYAN}This may take a while depending on your internet speed...{Style.RESET_ALL}")
268+
# Add an empty line to separate from HuggingFace progress bars
269+
print("")
270+
271+
# Set a flag to indicate we're downloading a model
272+
# This will help our logger know to let HuggingFace's progress bars through
273+
try:
274+
# Access the module's global variable
275+
import locallab.utils.progress
276+
locallab.utils.progress.is_downloading = True
277+
except:
278+
# Fallback if import fails
279+
pass
268280

269281
# Load tokenizer first
270282
logger.info(f"Loading tokenizer for {model_id}...")
@@ -281,7 +293,19 @@ async def _load_model_with_optimizations(self, model_id: str):
281293
token=hf_token if hf_token else None,
282294
**quant_config
283295
)
284-
print(f"\n{Fore.GREEN}Model {model_id} downloaded successfully!{Style.RESET_ALL}")
296+
# Reset the downloading flag
297+
try:
298+
# Access the module's global variable
299+
import locallab.utils.progress
300+
locallab.utils.progress.is_downloading = False
301+
except:
302+
# Fallback if import fails
303+
pass
304+
305+
# Add an empty line and a clear success message
306+
print(f"\n{Fore.GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Style.RESET_ALL}")
307+
print(f"{Fore.GREEN}✅ Model {model_id} downloaded successfully!{Style.RESET_ALL}")
308+
print(f"{Fore.GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Style.RESET_ALL}")
285309
logger.info(f"Model weights loaded successfully")
286310

287311
# Apply additional optimizations
@@ -1045,7 +1069,18 @@ async def load_custom_model(self, model_name: str, fallback_model: Optional[str]
10451069
# Load tokenizer first
10461070
logger.info(f"Loading tokenizer for custom model {model_name}...")
10471071
print(f"\n{Fore.GREEN}Downloading custom model: {model_name}{Style.RESET_ALL}")
1048-
print(f"{Fore.CYAN}This may take a while depending on your internet speed...{Style.RESET_ALL}\n")
1072+
print(f"{Fore.CYAN}This may take a while depending on your internet speed...{Style.RESET_ALL}")
1073+
# Add an empty line to separate from HuggingFace progress bars
1074+
print("")
1075+
1076+
# Set a flag to indicate we're downloading a model
1077+
try:
1078+
# Access the module's global variable
1079+
import locallab.utils.progress
1080+
locallab.utils.progress.is_downloading = True
1081+
except:
1082+
# Fallback if import fails
1083+
pass
10491084

10501085
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
10511086
logger.info(f"Tokenizer loaded successfully")
@@ -1058,7 +1093,19 @@ async def load_custom_model(self, model_name: str, fallback_model: Optional[str]
10581093
device_map="auto",
10591094
quantization_config=quant_config
10601095
)
1061-
print(f"\n{Fore.GREEN}Custom model {model_name} downloaded successfully!{Style.RESET_ALL}")
1096+
# Reset the downloading flag
1097+
try:
1098+
# Access the module's global variable
1099+
import locallab.utils.progress
1100+
locallab.utils.progress.is_downloading = False
1101+
except:
1102+
# Fallback if import fails
1103+
pass
1104+
1105+
# Add an empty line and a clear success message
1106+
print(f"\n{Fore.GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Style.RESET_ALL}")
1107+
print(f"{Fore.GREEN}✅ Custom model {model_name} downloaded successfully!{Style.RESET_ALL}")
1108+
print(f"{Fore.GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Style.RESET_ALL}")
10621109
logger.info(f"Model weights loaded successfully")
10631110

10641111
self.model = self._apply_optimizations(self.model)

locallab/utils/progress.py

Lines changed: 47 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -155,32 +155,61 @@ def custom_progress_callback(
155155
pbar.close()
156156

157157
def configure_hf_hub_progress():
158-
"""Configure HuggingFace Hub to use our custom progress callback"""
158+
"""
159+
Configure HuggingFace Hub to use its native progress bars for model downloads.
160+
This completely bypasses our custom logger for HuggingFace download progress.
161+
"""
159162
try:
160-
from huggingface_hub import constants
163+
# 1. Enable HuggingFace's native progress bars
164+
from huggingface_hub.utils import logging as hf_logging
165+
hf_logging.enable_progress_bars()
161166

162-
# Disable HF Transfer (which uses a different progress reporting mechanism)
163-
constants.HF_HUB_ENABLE_HF_TRANSFER = False
167+
# 2. Enable HF Transfer for better download experience
168+
from huggingface_hub import constants
169+
constants.HF_HUB_ENABLE_HF_TRANSFER = True
164170

165-
# Configure the download progress callback
171+
# 3. Make sure we're NOT overriding HuggingFace's progress callback
172+
# This is critical - we want to use their native implementation
166173
from huggingface_hub import file_download
167-
file_download._tqdm_callback = custom_progress_callback
174+
if hasattr(file_download, "_tqdm_callback") and file_download._tqdm_callback == custom_progress_callback:
175+
# Reset to default if we previously set it to our custom callback
176+
file_download._tqdm_callback = None
168177

169-
# Disable default progress bars
170-
try:
171-
# Try to disable the default tqdm in huggingface_hub
172-
from huggingface_hub.utils import logging as hf_logging
173-
hf_logging.disable_progress_bars()
174-
except:
175-
pass
178+
# 4. Set a flag to indicate we're using HuggingFace's native progress bars
179+
global is_downloading
180+
is_downloading = True
176181

177-
logger.debug("Configured HuggingFace Hub to use custom progress callback")
182+
logger.debug("Configured HuggingFace Hub to use its native progress bars")
178183
except ImportError:
179-
logger.warning("Failed to configure HuggingFace Hub progress callback")
184+
logger.warning("Failed to configure HuggingFace Hub progress bars")
180185
except Exception as e:
181-
logger.warning(f"Error configuring HuggingFace Hub progress callback: {str(e)}")
186+
logger.warning(f"Error configuring HuggingFace Hub progress: {str(e)}")
182187

183188
# Function to check if we're currently downloading
184189
def is_model_downloading():
185-
"""Check if a model is currently being downloaded"""
186-
return is_downloading
190+
"""
191+
Check if a model is currently being downloaded.
192+
193+
This function now checks for active HuggingFace downloads by looking
194+
for tqdm progress bars in sys.stdout that contain model file patterns.
195+
"""
196+
# First check our global flag
197+
if is_downloading:
198+
return True
199+
200+
# Also check if there are any active HuggingFace downloads
201+
# by looking for specific patterns in the output
202+
try:
203+
# Check if there are any tqdm instances in sys.stdout
204+
if hasattr(sys.stdout, '_instances') and sys.stdout._instances:
205+
for instance in sys.stdout._instances:
206+
if hasattr(instance, 'desc') and isinstance(instance.desc, str):
207+
# Look for common model file patterns in the description
208+
if any(pattern in instance.desc.lower() for pattern in
209+
['model', 'weight', 'safetensors', 'bin', 'pytorch_model']):
210+
return True
211+
except:
212+
# If anything goes wrong with the check, default to False
213+
pass
214+
215+
return False

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747

4848
setup(
4949
name="locallab",
50-
version="0.6.2",
50+
version="0.6.3",
5151
packages=find_packages(include=["locallab", "locallab.*"]),
5252
install_requires=install_requires,
5353
extras_require={

0 commit comments

Comments
 (0)