Skip to content

Commit 7f1aa89

Browse files
committed
Clean dashboard script to avoid regenerating figures
1 parent ce16866 commit 7f1aa89

File tree

11 files changed

+27
-25
lines changed

11 files changed

+27
-25
lines changed

content/dashboard/dashboard.py

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ def make_plot(df, y_column, title, filename, y_label, x_min=None, x_max=None, y_
140140
html_file_path,
141141
include_plotlyjs='cdn',
142142
full_html=False,
143+
div_id=filename,
143144
config={
144145
"displaylogo": False,
145146
"modeBarButtonsToRemove": [
@@ -159,36 +160,40 @@ def make_plot(df, y_column, title, filename, y_label, x_min=None, x_max=None, y_
159160

160161
print(f"Successfully wrote HTML: {html_file_path}")
161162

162-
163163
# Fetch raw data from FRED
164+
# Fetch unemployment level
164165
u = fred.get_series("UNEMPLOY")
165166
if u is not None and not u.empty:
166-
print(f"UNEMPLOY data fetched successfully. Shape: {u.shape}, Last date: {u.index.max()}")
167+
print(f"UNEMPLOY data fetched successfully: shape = {u.shape}")
167168
u.index = pd.to_datetime(u.index)
168169
latest_u_date = u.index.max()
169170
latest_u_value = u.iloc[-1]
170-
print(f"UNEMPLOY raw latest point: Date='{latest_u_date.strftime('%Y-%m-%d')}', Value='{latest_u_value}'")
171+
previous_u_value = u.iloc[-2]
172+
print(f"UNEMPLOY raw latest point: date = {latest_u_date.strftime('%Y-%m-%d')}, value = {latest_u_value}, previous value = {previous_u_value}")
171173
else:
172174
print("Failed to fetch UNEMPLOY data or data is empty.")
173175

176+
# Fetch vacancy level
174177
v = fred.get_series("JTSJOL")
175178
if v is not None and not v.empty:
176-
print(f"JTSJOL data fetched successfully. Shape: {v.shape}, Last date: {v.index.max()}")
179+
print(f"JTSJOL data fetched successfully: shape = {v.shape}")
177180
v.index = pd.to_datetime(v.index)
178181
latest_v_date = v.index.max()
179182
latest_v_value = v.iloc[-1]
180183
previous_v_value = v.iloc[-2]
181-
print(f"JTSJOL raw latest point: Date='{latest_v_date.strftime('%Y-%m-%d')}', Latest value='{latest_v_value}', Previous value='{previous_v_value}'")
184+
print(f"JTSJOL raw latest point: date = {latest_v_date.strftime('%Y-%m-%d')}, value = {latest_v_value}, previous value = {previous_v_value}")
182185
else:
183186
print("Failed to fetch JTSJOL data or data is empty.")
184187

188+
# Fetch labor force level
185189
lf = fred.get_series("CLF16OV")
186190
if lf is not None and not lf.empty:
187-
print(f"CLF16OV data fetched successfully. Shape: {lf.shape}, Last date: {lf.index.max()}")
191+
print(f"CLF16OV data fetched successfully: shape = {lf.shape}")
188192
lf.index = pd.to_datetime(lf.index)
189193
latest_lf_date = lf.index.max()
190194
latest_lf_value = lf.iloc[-1]
191-
print(f"CLF16OV raw latest point: Date='{latest_lf_date.strftime('%Y-%m-%d')}', Value='{latest_lf_value}'")
195+
previous_lf_value = lf.iloc[-2]
196+
print(f"CLF16OV raw latest point: date = {latest_lf_date.strftime('%Y-%m-%d')}, value = {latest_lf_value}, previous value = {previous_lf_value}")
192197

193198
else:
194199
print("Failed to fetch CLF16OV data or data is empty.")
@@ -253,7 +258,7 @@ def make_plot(df, y_column, title, filename, y_label, x_min=None, x_max=None, y_
253258
latest_processed_u_date = df.index.max()
254259
latest_processed_u_value = df['data'].iloc[-1]
255260
previous_processed_u_value = df['data'].iloc[-2]
256-
print(f"Unemployment rate latest processed point for CSV: Date='{latest_processed_u_date.strftime('%Y-%m-%d')}', Value='{latest_processed_u_value:.4f}', Other value='{previous_processed_u_value:.4f}'")
261+
print(f"Unemployment rate latest processed point for CSV: date = {latest_processed_u_date.strftime('%Y-%m-%d')}, value = {latest_processed_u_value:.4f}, previous value = {previous_processed_u_value:.4f}")
257262
else:
258263
print("'df' is EMPTY before writing CSV.")
259264

@@ -286,7 +291,7 @@ def make_plot(df, y_column, title, filename, y_label, x_min=None, x_max=None, y_
286291
latest_processed_v_date = df.index.max()
287292
latest_processed_v_value = df['data'].iloc[-1]
288293
previous_processed_v_value = df['data'].iloc[-2]
289-
print(f"Vacancy rate latest processed point for CSV: Date='{latest_processed_v_date.strftime('%Y-%m-%d')}', Value='{latest_processed_v_value:.4f}', Other value='{previous_processed_v_value:.4f}'")
294+
print(f"Vacancy rate latest processed point for CSV: date = {latest_processed_v_date.strftime('%Y-%m-%d')}, value = {latest_processed_v_value:.4f}, previous value = {previous_processed_v_value:.4f}")
290295
else:
291296
print("'df' is EMPTY before writing CSV.")
292297

@@ -314,12 +319,8 @@ def make_plot(df, y_column, title, filename, y_label, x_min=None, x_max=None, y_
314319
# Immediately after writing, read back and print tail in Python
315320
try:
316321
df_read_back = pd.read_csv(csv_path_absolute, index_col="Date", parse_dates=True)
317-
print("Last 5 rows of vacancy_rate.csv as read by Python immediately after write:")
318-
print(df_read_back.tail(5).to_string())
319-
latest_read_back_v_date = df_read_back.index.max()
320-
latest_read_back_v_value = df_read_back["Vacancy rate (%)"].iloc[-1]
321-
previous_read_back_v_value = df_read_back["Vacancy rate (%)"].iloc[-2]
322-
print(f"Vacancy rate latest readback point: Date='{latest_read_back_v_date.strftime('%Y-%m-%d')}', Value='{latest_read_back_v_value:.4f}', Other value='{previous_read_back_v_value:.4f}'")
322+
print("Last 3 rows of vacancy_rate.csv as read by Python immediately after write:")
323+
print(df_read_back.tail(3).to_string())
323324

324325
except Exception as e:
325326
print(f"Error reading back {csv_path_absolute} in Python: {e}")
@@ -441,6 +442,7 @@ def make_plot(df, y_column, title, filename, y_label, x_min=None, x_max=None, y_
441442
beveridge_curve_html_path,
442443
include_plotlyjs='cdn',
443444
full_html=False,
445+
div_id="beveridge_curve",
444446
config={
445447
"displaylogo": False,
446448
"modeBarButtonsToRemove": [

content/papers/16.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
title: "Has the Recession Started?"
33
date: 2025-05-09
44
url: /16/
5-
tags: ["Beveridge curve", "business cycles", "job vacancies", "Michez rule", "nowcasting", "recession indicator", "recession probability", "recession threshold", "Sahm rule", "unemployment"]
5+
tags: ["Beveridge curve", "business cycles", "job vacancies", "Michez rule", "nowcasting", "recession indicator", "recession probability", "recession threshold", "Sahm rule"]
66
author: ["Pascal Michaillat","Emmanuel Saez"]
77
description: "This paper builds the Michez rule, a fast and robust method to detect US recessions from data on unemployment and job vacancies. Published in OBES, 2025."
88
summary: "This paper develops the Michez rule, a fast and robust method to detect US recessions using data on unemployment and job vacancies. In December 2024, the rule gives a recession probability of 27%; this recession risk was first detected in March 2024."

content/papers/17.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
title: "Early and Accurate Recession Detection Using Classifiers on the Anticipation-Precision Frontier"
33
date: 2025-06-11
44
url: /17/
5-
tags: ["backtesting", "Beveridge curve", "business cycles", "classifier ensemble", "job vacancies", "nowcasting", "perfect classifiers", "recession probability", "unemployment"]
5+
tags: ["backtesting", "Beveridge curve", "business cycles", "classifier ensemble", "job vacancies", "nowcasting", "perfect classifiers", "recession probability"]
66
author: ["Pascal Michaillat"]
77
description: "This paper detects US recessions in real time from unemployment and vacancy data by combining perfect classifiers on the anticipation-precision frontier."
88
summary: "This paper develops a method for detecting US recessions in real time using unemployment and vacancy data. The method combines perfect recession classifiers on the anticipation-precision frontier. Backtesting confirms that recessions are detected early and accurately."

static/dashboard/beveridge_curve.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

static/dashboard/feru.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

static/dashboard/labor_market_tightness.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

static/dashboard/recession_indicator.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

static/dashboard/recession_probability.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

static/dashboard/unemployment_gap.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

static/dashboard/unemployment_rate.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)