Featured Photo by Lukas on Pexels.
FRED stands for Federal Reserve Economic Data, and is a database of time series economic data that has been aggregated from a bunch of sources. This is a great place to find financial data. You can visit the FRED web site to search for a data series or use the Python fredapi to download data programmatically.
FRED API
- Welcome to FRED, your trusted source for economic data since 1991.
- Download, graph, and track 819,000 US and international time series from 110 sources.
- You can view and toggle the available data series here.
- You need to register your user account and request your API key here.
Importing Economic Series
Let’s set the working directory YOURPATH
import os
os.chdir(‘YOURPATH’)
os. getcwd()
import libraries
import pandas as pd
import numpy as np
from fredapi import Fred
import matplotlib.pyplot as plt
import seaborn as sns
and define the API key
fred = Fred(api_key=’your_api_key’)
Let’s import the series of interest
start = ‘2022-01-01’
end = ‘2023-04-19’
monthyl_yield_curve = pd.DataFrame(fred.get_series(
‘T10Y2Y’,
observation_start=start,
observation_end=end)).resample(“M”).mean()
fed_funds_rate = pd.DataFrame(fred.get_series(
‘FEDFUNDS’,
observation_start=start,
observation_end=end)).resample(“M”).mean()
unemployment_rate = pd.DataFrame(fred.get_series(
‘UNRATE’,
observation_start=start,
observation_end=end)).resample(“M”).mean()
sp500 = pd.DataFrame(fred.get_series(
‘SP500’,
observation_start=start,
observation_end=end)).resample(“M”).mean()
Combining the data frames into one dataframe
df = pd.concat([monthyl_yield_curve,fed_funds_rate,unemployment_rate,sp500],axis=1)
df.columns = [‘Yield_Curve’,’Fed_Funds’,’Unemp_Rate’,’sp500′]
Let’s plot the Correlation Heatmap
plt.figure(figsize=(15, 8))
mask = np.triu(np.ones_like(df.corr(), dtype=np.bool))
corr_map = sns.heatmap(df.corr(),vmin = -1,vmax = 1,mask = mask,cmap = “Blues”,annot_kws={“size”: 12},annot=True)

Let’s Create the following Multiple Plots
plt.style.use(‘fivethirtyeight’)
fig, axs = plt.subplots(4,figsize=(20, 20))
fig.suptitle(‘Selected FRED Trends’, fontsize = 32)
axs[0].plot(df.index,df.Yield_Curve)
axs[0].set_title(‘[1] Yield Curve’, fontsize=24)
axs[1].plot(df.index,df.Fed_Funds)
axs[1].set_title(‘[2] Federal Funds Rate’, fontsize=24)
axs[2].plot(df.index,df.Unemp_Rate)
axs[2].set_title(‘[3] Unemployment Rate’, fontsize=24)
axs[3].plot(df.index,df.sp500)
axs[3].set_title(‘[4] SP 500’, fontsize=24)
plt.subplots_adjust(hspace=0.5)
plt.show()


SPY Trading Signals & Returns
Let’s look at the SPY trading signals using yfinance
import yfinance as yf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
TICKER = yf.Ticker(“SPY”)
TICKER_DF = TICKER.history(start=”2022-01-01″,end=”2023-04-19″)
TICKER_DF_SUB = TICKER_DF[[‘Open’,’Close’]]
TICKER_DF_SUB.tail()

Let’s compute trading signals based upon the following rolling means
TICKER_DF_SUB[“Slow”] = TICKER_DF_SUB[‘Close’].rolling(window = 200).mean()
TICKER_DF_SUB[“Fast”] = TICKER_DF_SUB[‘Close’].rolling(window = 50).mean()
TICKER_DF_SUB = TICKER_DF_SUB.dropna()
TICKER_DF_SUB.tail()

SIGNAL = []
Fast = TICKER_DF_SUB[‘Fast’]
Slow = TICKER_DF_SUB[‘Slow’]
for i in range(len(TICKER_DF_SUB)):
if i == 0:
SIGNAL.append(“NO_SIGNAL”)
else:
prev_fast = Fast[i-2]
prev_slow = Slow[i-2]
cur_fast = Fast[i-1]
cur_slow = Slow[i-1]
if ((prev_fast < prev_slow) & (cur_fast > cur_slow)):
SIGNAL.append(“BUY”)
elif ((prev_fast > prev_slow) & (cur_fast < cur_slow)):
SIGNAL.append(“SELL”)
else:
SIGNAL.append(“NO_SIGNAL”)
TICKER_DF_SUB[‘SIGNAL’] = SIGNAL
buy_indexes = list(TICKER_DF_SUB[TICKER_DF_SUB[‘SIGNAL’] == ‘BUY’].index)
sell_indexes = list(TICKER_DF_SUB[TICKER_DF_SUB[‘SIGNAL’] == ‘SELL’].index)
if len(sell_indexes) > len(buy_indexes):
sell_indexes.pop(0)
BUY_PRICES = [TICKER_DF_SUB[TICKER_DF_SUB.index == buy_indexes[i]].Open[0] for i in range(len(buy_indexes))]
SELL_PRICES = [TICKER_DF_SUB[TICKER_DF_SUB.index == sell_indexes[i]].Open[0] for i in range(len(sell_indexes))]
if len(BUY_PRICES) > len(SELL_PRICES):
SELL_PRICES.append(TICKER_DF_SUB.iloc[len(df)-1,:].Open)
sell_indexes.append(TICKER_DF_SUB.iloc[len(df)-1,:].name)
returns = list(np.array(SELL_PRICES) / np.array(BUY_PRICES) -1)
BACKTEST_DF = pd.DataFrame({
“BUY_DATES”:buy_indexes,
“SELL_DATES”:sell_indexes,
“BUY_PRICES”:BUY_PRICES,
“SELL_PRICES”:SELL_PRICES,
“RETURN”:returns
})
r = 1
for i in BACKTEST_DF.RETURN:
r = r * (1+i)
total_return = r / 1 – 1
print(“The total return is: ” + ‘{:.1%}’.format(total_return))
The total return is: -9.0%
Let’s add a signal column to our data given a slow and fast moving average
def add_sma(df,slow,fast):
slow_sma = df[‘Close’].rolling(window = slow).mean()
df[“Slow”] = slow_sma
fast_sma = df[‘Close’].rolling(window = fast).mean()
df[“Fast”] = fast_sma
Slow = df[‘Slow’]
Fast = df[‘Fast’]
SIGNAL = []
for i in range(len(df)):
if i == 0:
SIGNAL.append(“NO_SIGNAL”)
else:
prev_fast = Fast[i-2]
prev_slow = Slow[i-2]
cur_fast = Fast[i-1]
cur_slow = Slow[i-1]
if ((prev_fast < prev_slow) & (cur_fast > cur_slow)):
SIGNAL.append(“BUY”)
elif ((prev_fast > prev_slow) & (cur_fast < cur_slow)):
SIGNAL.append(“SELL”)
else:
SIGNAL.append(“NO_SIGNAL”)
df[‘SIGNAL’] = SIGNAL
Let’s run a backtest on our data (signal column is required)
def backtest(df):
buy_indexes = list(df[df[‘SIGNAL’] == ‘BUY’].index)
sell_indexes = list(df[df[‘SIGNAL’] == ‘SELL’].index)
if len(sell_indexes) > len(buy_indexes):
sell_indexes.pop(0)
BUY_PRICES = [df[df.index == buy_indexes[i]].Open[0] for i in range(len(buy_indexes))]
SELL_PRICES = [df[df.index == sell_indexes[i]].Open[0] for i in range(len(sell_indexes))]
if len(BUY_PRICES) > len(SELL_PRICES):
SELL_PRICES.append(df.iloc[len(df)-1,:].Open)
sell_indexes.append(df.iloc[len(df)-1,:].name)
returns = list(np.array(SELL_PRICES) / np.array(BUY_PRICES) -1)
BACKTEST_DF = pd.DataFrame({
“BUY_DATES”:buy_indexes,
“SELL_DATES”:sell_indexes,
“BUY_PRICES”:BUY_PRICES,
“SELL_PRICES”:SELL_PRICES,
“RETURN”:returns
})
return BACKTEST_DF
Let’s calculate our total return from our backtest data frame
def CUMU_RETURNS(df):
r = 1
for i in df.RETURN:
r = r * (1+i)
return (r / 1) – 1
def MA_OPTIMIZER(df,fast,slow):
FAST = []
SLOW = []
RETURN = []
for f in fast:
for s in slow:
FAST.append(f)
SLOW.append(s)
BACKTEST_DF = df
add_sma(BACKTEST_DF,s,f)
RETURN.append(CUMU_RETURNS(backtest(BACKTEST_DF)))
results = pd.DataFrame({
“FAST”:FAST,
“SLOW”:SLOW,
“RETURNS”:RETURN
})
return results
ICKER = yf.Ticker(“SPY”)
TICKER_DF = TICKER.history(start=”2022-01-01″,end=”2023-04-19″)
TICKER_DF_SUB = TICKER_DF[[‘Open’,’Close’]]
results = MA_OPTIMIZER(TICKER_DF_SUB,fast = range(10,100,5),slow = range(105,300,5))
results.sort_values(by=[‘RETURNS’]).tail(1)

Let’s plot the SPY MA Crossovers and trading signals
TICKER = yf.Ticker(“SPY”)
TICKER_DF = TICKER.history(start=”2022-01-01″,end=”2023-04-19″)
TICKER_DF_SUB = TICKER_DF[[‘Open’,’Close’]]
add_sma(
TICKER_DF_SUB,
slow = results.sort_values([‘RETURNS’]).tail(1)[‘SLOW’].values[0],
fast = results.sort_values([‘RETURNS’]).tail(1)[‘FAST’].values[0])
BACKTEST_DF = backtest(TICKER_DF_SUB)
TICKER_DF_SUB_NONA = TICKER_DF_SUB.dropna()
plt.style.use(‘bmh’)
plt.figure(figsize = (20,10))
TICKER_DF_SUB_NONA[“Close”].plot(label= “Close”)
TICKER_DF_SUB_NONA[“Fast”].plot(label = “Fast”)
TICKER_DF_SUB_NONA[“Slow”].plot(label = “Slow”)
plt.plot(
TICKER_DF_SUB_NONA[TICKER_DF_SUB_NONA[‘SIGNAL’] == “BUY”].index,
TICKER_DF_SUB_NONA[TICKER_DF_SUB_NONA[‘SIGNAL’] == “BUY”][‘Slow’].values,
‘^’,
markersize = 10,
color = ‘g’,
label = ‘buy’
)
plt.plot(
TICKER_DF_SUB_NONA[TICKER_DF_SUB_NONA[‘SIGNAL’] == “SELL”].index,
TICKER_DF_SUB_NONA[TICKER_DF_SUB_NONA[‘SIGNAL’] == “SELL”][‘Slow’].values,
‘v’,
markersize = 10,
color = ‘r’,
label = ‘sell’
)
plt.ylabel(‘Price’, fontsize = 15 )
plt.xlabel(‘Date’, fontsize = 15 )
plt.title(‘SPY MA Crossover’, fontsize = 20)
plt.legend()
plt.grid(color=’k’, linestyle=’–‘, linewidth=1)
plt.show()

Currency Exchange Rate Prediction Model
The Notebook looks into Python-based ML libraries to build a model that can predict the exchange rate of JPY to USD, and USD to EUR for a given day.
Let’s import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
# Import linear regression model
from sklearn.linear_model import LinearRegression
# Import model evaluation tools
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_log_error
# Generate data
import pandas_datareader.data as pdr
start_date = dt.datetime(2022,1,2)
end_date = dt.datetime(2023,4,19)
df = pdr.DataReader([“DEXJPUS”, “DEXUSEU”], data_source=”fred”, start=start_date, end=end_date)
#fill missing values with the data from previous day
df.fillna(method=”ffill”, inplace=True)
df.isna().sum()
DEXJPUS 0
DEXUSEU 0
dtype: int64
# create figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12,8))
fig.tight_layout(pad=4)
# plot JPYxUSD exchange rate
ax1.plot(df[“DEXJPUS”], color=”indianred”)
ax1.set(title=”JPY to USD exchange rate”,
xlabel=”Date”,
ylabel=”Rate”)
# plot USDx EUR exchange rate
ax2.plot(df[“DEXUSEU”], color=”royalblue”)
ax2.set(title=”USD to EUR echange rate”,
xlabel=”Date”,
ylabel=”Rate”);

Display a histogram of the rate exchange, taking the difference between each day and the previous day (day – previous day). To account for the previous date, we can use pandas
method for time series, dataFrame.shift()
. The shift()
method shifts index (date) by desired amount of periods.
# JPYxUSD histogram
JPY_USD = (df[“DEXJPUS”] – df[“DEXJPUS”].shift(1)) / df[“DEXJPUS”].shift(1)
JPY_USD.hist(bins=30, color=”indianred”);

# Plot a USD x EUR histogram
USD_EUR = (df[“DEXUSEU”] – df[“DEXUSEU”].shift(1)) / df[“DEXJPUS”].shift(1)
USD_EUR.hist(bins=30, color=”royalblue”);

Build a linear regression model to predict future prices.
# Drop DEXUSEU column
JPY_USD_df = df.drop(“DEXUSEU”, axis=1)
# create a dataframe that has information with up to 3 days beofre “today”
three_days_df = pd.concat([
JPY_USD_df,
JPY_USD_df.shift(1),
JPY_USD_df.shift(2),
JPY_USD_df.shift(3)
], axis=1)
three_days_df
# name the columns and drop rows with NaN values
three_days_df.columns = [“today”, “yesterday”, “2 days ago”, “3 days ago”]
three_days_df.dropna(inplace=True)
Now we are ready to split data to train and test data.
nov = three_days_df[“2023-1-1″:”2023-1-30”]
dec = three_days_df[“2023-3-19″:”2023-4-19”]
# split data into train test
X_train = nov.drop(“today”, axis=1)
y_train = nov[“today”]
X_test = dec.drop(“today”, axis=1)
y_test = dec[“today”]
# instantiate model
model = LinearRegression()
# fit the model
fit_model = model.fit(X_train, y_train)
# Model score
fit_model.score(X_test, y_test)
# import train_test_split
from sklearn.model_selection import train_test_split
# split all data into X and y
X = three_days_df.drop(“today”, axis=1)
y = three_days_df[“today”]
# split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=11)
# instantiate model
model = LinearRegression()
# fit model
fitted_model = model.fit(X_train, y_train)
# score model
fitted_model.score(X_test, y_test)
0.9850981595267364
Personal Income
Let's look at the Per Capita Personal Income
personal_income_series = fred.search_by_release(175, limit=5, order_by=’popularity’, sort_order=’desc’)
personal_income_series[‘title’]
series id PCPI06037 Per Capita Personal Income in Los Angeles County, CA PCPI06075 Per Capita Personal Income in San Francisco County/city, CA SEAT653PCPI Per Capita Personal Income in Seattle-Tacoma-Bellevue, W... DALL148PCPI Per Capita Personal Income in Dallas-Fort Worth-Arlingto... PHOE004PCPI Per Capita Personal Income in Phoenix-Mesa-Scottsdale, A... Name: title, dtype: object
df = {}
df[‘SF’] = fred.get_series(‘PCPI06075’)
df[‘NY’] = fred.get_series(‘PCPI36061’)
df[‘DC’] = fred.get_series(‘PCPI11001’)
df = pd.DataFrame(df)
df.plot()

Summary
- The FRED API gets you the economic data you need— anytime, anywhere.
- This is a database of over 267,000 economic time series from 80 sources.
- FRED offers a wealth of economic data and information to promote economic education and enhance economic research.
- FRED provides historical U.S. economic and financial data, including daily U.S. interest rates, monetary and business indicators, exchange rates, and regional economic data.
- We examined the Federal Funds Trends, Yield Curve, and Unemployment Rate correlations compared to the SPY Trading Signals & Returns based on the MA crossovers.
- We trained DEXJPUS and DEXUSEU currency exchange 98% ACC LinearRegression() models.
- We looked at the personal income per capita for NY, SF, and DC.
- Additional economic plots of interest are discussed in Appendix.
Explore More
- Revision 360 of Risk Aware Investing after SVB Collapse – 1. The Financial Sector
- USDTUSD | Tether USD Analysis 6 Nov ’22
- DJI Market State Analysis using the Cruz Fitting Algorithm
- Bear vs. Bull Portfolio Risk/Return Optimization QC Analysis
- Zacks Insights into the Commodity Bull Market
- Bear Market Similarity Analysis using Nasdaq 100 Index Data
- Inflation-Resistant Stocks to Buy
- A Weekday Market Research Update
Appendix: More FRED Examples
Let’s look at the following additional FRED charts:
Unemployment Rate:
fig = plt.figure(figsize=(14,6))
u = fp.series(‘UNRATE’)
plt.plot(u.data.index,u.data.values,’-‘,lw=3,alpha = 0.65)
plt.grid()
plt.ylabel(‘Percent’);

Real Gross Domestics Product:
gdp = fp.series(‘gdpc1’)
print(type(gdp))
<class 'fredpy.series'>
print(gdp.title)
print(gdp.units)
print(gdp.frequency)
print(gdp.date_range)
print(gdp.source)
Real Gross Domestic Product Billions of Chained 2012 Dollars Quarterly Range: 1947-01-01 to 2022-10-01 U.S. Bureau of Economic Analysis
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(1,1,1)
ax.plot(gdp.data,’-‘,lw=3,alpha = 0.65)
ax.grid()
ax.set_title(gdp.title)
ax.set_ylabel(gdp.units);

Real Gross Domestics Product 2022:
win = [’01-01-2022′,’31-12-2022′]
gdp_win = gdp.window(win)
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(1,1,1)
ax.plot(gdp_win.data,’-‘,lw=3,alpha = 0.65)
ax.grid()
ax.set_title(gdp_win.title)
ax.set_ylabel(gdp_win.units)
gdp_win.recessions()

Percentage Change in Real Gross Domestics Product
gdp_pc = gdp.pc(annualized=True)
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(1,1,1)
ax.plot(gdp_pc.data,’-‘,lw=3,alpha = 0.65)
ax.grid()
ax.set_title(gdp_pc.title)
ax.set_ylabel(gdp_pc.units)
gdp_pc.recessions()

Log Real Gross Domestics Product
gdp_log = gdp.log()
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(1,1,1)
ax.plot(gdp_log.data,’-‘,lw=3,alpha = 0.65)
ax.set_title(gdp_log.title)
ax.set_ylabel(gdp_log.units)
ax.grid()

Log Real GDP per Capita
fig = plt.figure(figsize=(16,4))
ax1 = fig.add_subplot(1,1,1)
ax1.plot(gdp.data,’-‘,lw=3,alpha = 0.65)
ax1.grid()
ax1.set_title(‘log real GDP per capita’)
gdp.recessions()

Download CPI and GDP deflator data
cpi = fp.series(‘CPIAUCSL’)
deflator = fp.series(‘GDPDEF’)
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(1,2,1)
ax.plot(cpi.data,’-‘,lw=3,alpha = 0.65)
ax.grid()
ax.set_title(cpi.title.split(‘:’)[0])
ax.set_ylabel(cpi.units)
ax = fig.add_subplot(1,2,2)
ax.plot(deflator.data,’-m’,lw=3,alpha = 0.65)
ax.grid()
ax.set_title(deflator.title)
ax.set_ylabel(deflator.units)
fig.tight_layout()

US inflation
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(1,1,1)
ax.plot(cpi_pi.data,’-‘,lw=3,alpha = 0.65,label=’cpi’)
ax.plot(def_pi.data,’-‘,lw=3,alpha = 0.65,label=’def’)
ax.legend(loc=’upper right’)
ax.set_title(‘US inflation’)
ax.set_ylabel(‘Percent’)
ax.grid()

Let’s compute the HP-filter
gdp_cycle, gdp_trend = gdp.hp_filter()
fig = plt.figure(figsize=(16,8))
ax1 = fig.add_subplot(2,1,1)
ax1.plot(gdp.data,’-‘,lw=3,alpha = 0.7,label=’actual’)
ax1.plot(gdp_trend.data,’r-‘,lw=3,alpha = 0.65,label=’HP trend’)
ax1.grid()
ax1.set_title(‘log real GDP per capita’)
gdp.recessions()
ax1.legend(loc=’lower right’)
fig.tight_layout()
ax1 = fig.add_subplot(2,1,2)
ax1.plot(gdp_cycle.data,’b-‘,lw=3,alpha = 0.65,label=’HP cycle’)
ax1.grid()
ax1.set_title(‘log real GDP per capita – dev from trend’)
gdp.recessions()
ax1.legend(loc=’lower right’)
fig.tight_layout()

Unemployment vs Inflation
u = fp.series(‘LNS14000028’)
p = fp.series(‘CPIAUCSL’)
p = p.pc(annualized=True)
p = p.ma(length=6,center=True)
p,u = fp.window_equalize([p,u])
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(2,1,1)
ax.plot(u.data,’b-‘,lw=2)
ax.grid(True)
ax.set_title(‘Unemployment’)
ax.set_ylabel(‘Percent’)
ax = fig.add_subplot(2,1,2)
ax.plot(p.data,’r-‘,lw=2)
ax.grid(True)
ax.set_title(‘Inflation’)
ax.set_ylabel(‘Percent’)
fig.autofmt_xdate()

Inflation and unemployment: BP-filtered data
p_bpcycle,p_bptrend = p.bp_filter(low=24,high=84,K=84)
u_bpcycle,u_bptrend = u.bp_filter(low=24,high=84,K=84)
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(1,1,1)
t = np.arange(len(u_bpcycle.data))
plt.scatter(u_bpcycle.data,p_bpcycle.data,facecolors=’none’,alpha=0.75,s=20,c=t, linewidths=1.5)
ax.set_xlabel(‘Unemployment rate (%)’)
ax.set_ylabel(‘Inflation rate (%)’)
ax.set_title(‘Inflation and unemployment: BP-filtered data’)
ax.grid(True)
cbar = plt.colorbar(ax = ax)
cbar.set_ticks([int(i) for i in cbar.get_ticks()[:-1]])
cbar.set_ticklabels([p_bpcycle.data.index[int(i)].strftime(‘%b %Y’) for i in cbar.get_ticks()[:]])

Inflation and unemployment: HP-filtered data
p_hpcycle,p_hptrend = p.hp_filter(lamb=129600)
u_hpcycle,u_hptrend = u.hp_filter(lamb=129600)
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(1,1,1)
t = np.arange(len(u_hpcycle.data))
plt.scatter(u_hpcycle.data,p_hpcycle.data,facecolors=’none’,alpha=0.75,s=20,c=t, linewidths=1.5)
ax.set_xlabel(‘Unemployment rate (%)’)
ax.set_ylabel(‘Inflation rate (%)’)
ax.set_title(‘Inflation and unemployment: HP-filtered data’)
ax.grid(True)
cbar = plt.colorbar(ax = ax)
cbar.set_ticks([int(i) for i in cbar.get_ticks()[:-1]])
cbar.set_ticklabels([p_hpcycle.data.index[int(i)].strftime(‘%b %Y’) for i in cbar.get_ticks()[:]])

US GDP vitanges 1991 and 1992
Get all available vintages
gdp_vintage_dates = fp.get_vintage_dates(‘GDPA’)
print(‘Number of vintages available:’,len(gdp_vintage_dates))
print(‘Oldest vintage: ‘,gdp_vintage_dates[0])
print(‘Most recent vintage: ‘,gdp_vintage_dates[1])
Number of vintages available: 344 Oldest vintage: 1991-12-04 Most recent vintage: 1992-01-29
gdp_old = fp.series(‘GDPA’,observation_date = gdp_vintage_dates[0])
gdp_cur = fp.series(‘GDPA’,observation_date = gdp_vintage_dates[-1])
gdp_old, gdp_cur = fp.window_equalize([gdp_old, gdp_cur])
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(gdp_old.data,lw=3,alpha = 0.65,label=pd.to_datetime(gdp_vintage_dates)[0].strftime(‘Vintage: %b %Y’))
ax.plot(gdp_cur.data,lw=3,alpha = 0.65,label=pd.to_datetime(gdp_vintage_dates)[1].strftime(‘Vintage: %b %Y’))
ax.set_ylabel(gdp_cur.units)
ax.legend(loc=’center left’, bbox_to_anchor=(1, 0.5))
ax.set_title(‘US GDP’)
ax.grid()

Make a one-time donation
Make a monthly donation
Make a yearly donation
Choose an amount
Or enter a custom amount
Your contribution is appreciated.
Your contribution is appreciated.
Your contribution is appreciated.
DonateDonate monthlyDonate yearly