I'm trying to reverse engineer what stocks are in a ETF using python.
In my code, I create a fake ETF that is equal weighted 20 random stocks.
I then try to reverse engineer whats in my ETF using price data for a universe of 200+ stocks.
No matter what approach i use, how much (or little) price data i provide, the results seem to be much poorer than i assumed i would be able to get using various techniques.
The results only get worse if i try to do a long short ETF (and remove the positive only constraint).
Does anyone have any tips what i could try next to get better results?
Sample code:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.linear_model import Lasso
import plotly.graph_objects as go
import random
import time
from sklearn.metrics import r2_score
Download historical data for all stocks/index
start_date = "2020-03-01"
end_date = "2025-03-01"
rics = ['^AXJO',
'360.AX','A2M.AX', 'A4N.AX', 'AAI.AX', 'ABB.AX', 'ABG.AX', 'ACL.AX', 'AD8.AX', 'ADH.AX', 'ADT.AX', 'AEF.AX', 'AEL.AX', 'AFG.AX', 'AGL.AX', 'AIA.AX', 'ALD.AX', 'ALL.AX', 'ALQ.AX', 'ALX.AX', 'AMC.AX', 'AMP.AX', 'ANN.AX',
'ANZ.AX', 'AOV.AX', 'APA.AX', 'APE.AX', 'ARB.AX', 'ARF.AX', 'ARU.AX', 'ASB.AX', 'ASK.AX', 'ASX.AX', 'AUB.AX','AX1.AX', 'AZJ.AX', 'BAP.AX', 'BEN.AX', 'BGA.AX', 'BGL.AX', 'BHP.AX', 'BKW.AX', 'BMN.AX', 'BOE.AX', 'BOQ.AX',
'BPT.AX', 'BRG.AX', 'BRN.AX', 'BSL.AX', 'BWP.AX', 'BXB.AX', 'CAR.AX', 'CBA.AX', 'CCP.AX', 'CDA.AX', 'CGF.AX', 'CHC.AX', 'CHN.AX', 'CIA.AX', 'CIP.AX', 'CKF.AX', 'CLW.AX', 'CMM.AX', 'CMW.AX', 'CNI.AX', 'CNU.AX', 'COF.AX',
'COH.AX', 'COL.AX', 'CPU.AX', 'CQE.AX', 'CQR.AX', 'CRN.AX', 'CSC.AX', 'CSL.AX', 'CTD.AX', 'CTT.AX', 'CU6.AX', 'CUV.AX', 'CWY.AX', 'DBI.AX', 'DDR.AX', 'DEG.AX', 'DHG.AX', 'DMP.AX', 'DOW.AX', 'DRO.AX', 'DRR.AX', 'DTL.AX',
'DVP.AX', 'DXI.AX', 'DXS.AX', 'DYL.AX', 'EBO.AX', 'EDV.AX', 'ELD.AX', 'EMR.AX', 'EVN.AX', 'EVT.AX', 'FLT.AX', 'FMG.AX', 'FPH.AX', 'FPR.AX', 'GEM.AX', 'GMD.AX', 'GMG.AX', 'GNC.AX', 'GOR.AX', 'GOZ.AX', 'GPT.AX',
'GTK.AX', 'GWA.AX', 'GYG.AX', 'HCW.AX', 'HDN.AX', 'HLI.AX', 'HLS.AX', 'HMC.AX', 'HPI.AX', 'HSN.AX', 'HUB.AX', 'HVN.AX', 'IAG.AX', 'IDX.AX', 'IEL.AX', 'IFL.AX', 'IFM.AX', 'IFT.AX', 'IGO.AX', 'ILU.AX', 'IMD.AX', 'IMM.AX',
'IMU.AX', 'INA.AX', 'ING.AX', 'INR.AX', 'IPH.AX', 'IPL.AX', 'IPX.AX', 'IRE.AX', 'JBH.AX', 'JDO.AX', 'JHX.AX', 'JIN.AX', 'JLG.AX', 'KAR.AX', 'KGN.AX', 'KLS.AX', 'LIC.AX', 'LLC.AX', 'LLL.AX', 'LNW.AX', 'LOT.AX', 'LOV.AX',
'LTM.AX', 'LTR.AX', 'LYC.AX', 'MAC.AX', 'MAF.AX', 'MAQ.AX', 'MEI.AX', 'MFG.AX', 'MGH.AX', 'MGR.AX', 'MIN.AX', 'MMS.AX', 'MND.AX', 'MP1.AX', 'MPL.AX', 'MQG.AX', 'MSB.AX', 'MTS.AX', 'MVF.AX', 'MYS.AX', 'MYX.AX', 'NAB.AX',
'NAN.AX', 'NCK.AX', 'NEC.AX', 'NEM.AX', 'NEU.AX', 'NHC.AX', 'NHF.AX', 'NIC.AX', 'NSR.AX', 'NST.AX', 'NUF.AX', 'NVX.AX', 'NWH.AX', 'NWL.AX', 'NWS.AX', 'NXG.AX', 'NXL.AX', 'NXT.AX', 'OFX.AX', 'OML.AX', 'OPT.AX', 'ORA.AX',
'ORG.AX', 'ORI.AX', 'PDI.AX', 'PDN.AX', 'PFP.AX', 'PLS.AX', 'PME.AX', 'PMT.AX', 'PMV.AX', 'PNI.AX', 'PNV.AX', 'PPT.AX', 'PRN.AX', 'PRU.AX', 'PTM.AX', 'PWH.AX', 'PXA.AX', 'QAN.AX', 'QBE.AX', 'QRI.AX', 'QUB.AX', 'RDX.AX',
'REA.AX', 'REG.AX', 'REH.AX', 'RFF.AX', 'RGN.AX', 'RHC.AX', 'RIC.AX', 'RIO.AX', 'RMD.AX', 'RMS.AX', 'RRL.AX', 'RSG.AX', 'RUL.AX', 'RWC.AX', 'S32.AX', 'SCG.AX', 'SDF.AX', 'SDR.AX', 'SEK.AX', 'SFR.AX', 'SGH.AX', 'SGM.AX',
'SGP.AX', 'SGR.AX', 'SHL.AX', 'SHV.AX', 'SIG.AX', 'SIQ.AX', 'SKC.AX', 'SLC.AX', 'SLX.AX', 'SMR.AX', 'SPK.AX', 'SPR.AX', 'SSM.AX', 'STO.AX', 'STX.AX', 'SUL.AX', 'SUN.AX', 'SYA.AX', 'SYR.AX', 'TAH.AX', 'TCL.AX',
'TLC.AX', 'TLS.AX', 'TLX.AX', 'TNE.AX', 'TPG.AX', 'TPW.AX', 'TUA.AX', 'TWE.AX', 'TYR.AX', 'URW.AX', 'VAU.AX', 'VCX.AX', 'VEA.AX', 'VNT.AX', 'VSL.AX', 'VUL.AX', 'WA1.AX', 'WAF.AX', 'WBC.AX', 'WBT.AX', 'WC8.AX', 'WDS.AX',
'WEB.AX', 'WES.AX', 'WGX.AX', 'WJL.AX', 'WOR.AX', 'WOW.AX', 'WPR.AX', 'WTC.AX', 'XRO.AX', 'XYZ.AX', 'YAL.AX', 'ZIP.AX'
]
#get data and remove NaNs
data = yf.download(rics, start=start_date, end=end_date, progress=False)['Close']
print(f"Data shape: {data.shape}")
data = data.dropna(subset=['^AXJO'])
print(f"Data shape after removing NaN rows in AXJO: {data.shape}")
data = data.dropna(axis=1)
print(f"Data shape after removing stocks with NaN: {data.shape}")
stock_data = data.drop(columns=['^AXJO'])
index_data = data[['^AXJO']].copy()
#Check data
print(f"Cleaned stock data shape: {stock_data.shape}")
print(f"Cleaned index data shape: {index_data.shape}")
assert stock_data.shape[0] == index_data.shape[0], "Row count mismatch!"
Generate random ETF
num_stocks = 20
etf_price = 100
stocks = random.sample(list(stock_data.columns), num_stocks)
weights = [1/num_stocks] * num_stocks
prices = list(stock_data.iloc[0][stocks])
constituents = pd.DataFrame({
'Stock': stocks,
'Price': prices,
'Weight': weights
})
constituents['Quantity'] = constituents['Weight'] * etf_price / constituents['Price']
constituents = constituents.sort_values(by='Stock', ascending=True).reset_index(drop=True)
display(constituents)
etf = (stock_data[stocks] * constituents.set_index('Stock')['Quantity']).sum(axis=1).to_frame(name='ETF')
fig = go.Figure()
fig.add_trace(go.Scatter(x=etf.index, y=etf["ETF"], mode="lines", name="ETF Price"))
fig.update_layout(title="ETF", xaxis_title="Date", yaxis_title="Price")
fig.show()
#specify universe to test against
stock_universe = stocks.copy()
#test against whole universe
stock_universe = stock_data.columns
test_data = stock_data[stock_universe].copy()
X = test_data.values
Y = etf.values.flatten()
reduce sample size / test against more or less data
#X = X[:100]
#Y = Y[:100]
lasso = Lasso(alpha=0.00001, max_iter=1000000000, positive=True)
lasso.fit(X, Y)
model_quantities = lasso.coef_
model_stocks = np.where(model_quantities > 0)[0]
model_weights = (model_quantities * X[0, :]) / Y[0]
model_results = pd.DataFrame({
"Stock": test_data.columns,
"Model Quantity": model_quantities,
"Model Weight": model_weights
})
model_results = model_results.sort_values(by='Stock', ascending=True).reset_index(drop=True)
model_etf = (stock_data[stocks] * model_results.set_index('Stock')['Model Quantity']).sum(axis=1).to_frame(name='Model ETF')
constituent_comparison = pd.merge(constituents,model_results, on='Stock', how='outer')
constituent_comparison.fillna(0, inplace=True)
constituent_comparison['Weight Error'] = round(constituent_comparison['Model Weight'] - constituent_comparison['Weight'],3)
constituent_comparison = constituent_comparison.sort_values(by=['Model Weight', 'Weight'], ascending=[False, True]).reset_index(drop=True)
constituent_comparison_formatted = constituent_comparison.head(15).style.format({'Model Weight': '{:.1%}','Weight Error': '{:.1%}'})
print(f'{len(model_stocks)} stocks in the Model ETF')
display(constituent_comparison_formatted)
returns_comparision = etf.join(model_etf)
returns_comparision['ETF Returns'] = returns_comparision['ETF'].pct_change().dropna()
returns_comparision['Model ETF Returns'] = returns_comparision['Model ETF'].pct_change().dropna()
returns_comparision['ETF Cum Return'] = (1 + returns_comparision['ETF Returns']).cumprod().fillna(1)
returns_comparision['Model ETF Cum Return'] = (1 + returns_comparision['Model ETF Returns']).cumprod().fillna(1)
returns_comparision['Error'] = returns_comparision['ETF Returns'] - returns_comparision['Model ETF Returns']
fig = go.Figure()
fig.add_trace(go.Scatter(x=returns_comparision.index, y=returns_comparision["ETF"], mode="lines", name="ETF"))
fig.add_trace(go.Scatter(x=returns_comparision.index, y=returns_comparision["Model ETF"], mode="lines", name="Model ETF"))
fig.update_layout(title="ETF vs Model ETF", xaxis_title="Date", yaxis_title="Price")
fig.show()
#Cum Returns
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x=returns_comparision.index, y=returns_comparision["ETF Cum Return"], mode="lines", name="ETF"))
fig2.add_trace(go.Scatter(x=returns_comparision.index, y=returns_comparision["Model ETF Cum Return"], mode="lines", name="Model ETF"))
fig2.update_layout(title="ETF vs Model ETF", xaxis_title="Date", yaxis_title="Price")
fig2.show()
#Error
fig3 = go.Figure()
fig3.add_trace(go.Bar(x=returns_comparision.index, y=returns_comparision['Error'], name='Return Difference (bps)'))
fig3.update_layout(title="ETF Returns less Predicted Return", yaxis_tickformat=".3%", xaxis_title="Date", yaxis_title="bps Error")
fig3.show()
Calculate statistics
mean_error = returns_comparision['Error'].mean()
std_error = returns_comparision['Error'].std()
max_error = returns_comparision['Error'].max()
min_error = returns_comparision['Error'].min()
r_squared = r2_score(returns_comparision['ETF Returns'][1:], returns_comparision['Model ETF Returns'][1:])
Print stats
print("Error Statistics:")
print(f"Mean Error: {mean_error:.2%}")
print(f"Standard Deviation Error: {std_error:.2%}")
print(f"Max Error: {max_error:.2%}")
print(f"Min Error: {min_error:.2%}")
print(f"R-squared: {r_squared:.2%}")