File tree Expand file tree Collapse file tree 5 files changed +27
-415
lines changed
Expand file tree Collapse file tree 5 files changed +27
-415
lines changed Original file line number Diff line number Diff line change 22LocalLab - A lightweight AI inference server for running LLMs locally
33"""
44
5- __version__ = "0.4.40 "
5+ __version__ = "0.4.41 "
66
77# Only import what's necessary initially, lazy-load the rest
88from .logger import get_logger
Original file line number Diff line number Diff line change @@ -47,7 +47,18 @@ def __init__(self):
4747 self .tokenizer = None
4848 self .current_model = None
4949 self ._loading = False
50- self ._last_use = time .time ()
50+ self ._last_use = time .time () # Initialize _last_use
51+ self .response_cache = {} # Add cache dictionary
52+
53+ @property
54+ def last_used (self ) -> float :
55+ """Get the timestamp of last model use"""
56+ return self ._last_use
57+
58+ @last_used .setter
59+ def last_used (self , value : float ):
60+ """Set the timestamp of last model use"""
61+ self ._last_use = value
5162
5263 def _get_quantization_config (self ) -> Optional [Dict [str , Any ]]:
5364 """Get quantization configuration based on settings"""
You can’t perform that action at this time.
0 commit comments