Untitled
unknown
plain_text
a month ago
20 kB
4
Indexable
import pandas as pd import numpy as np from datetime import datetime from dateutil.relativedelta import relativedelta from hmmlearn.hmm import GaussianHMM ############################################### # 1. Data Loading ############################################### def load_price_data(filepath): """ Load historical prices from an Excel file. Assumes that the first column is dates and the remaining columns are tickers. """ df = pd.read_excel(filepath, index_col=0) df.index = pd.to_datetime(df.index) return df def load_macro_data(filepath): """ Load macro indicators from an Excel file. Assumes first column is Date, then VIX, Consumer Confidence, IG Spreads. """ df = pd.read_excel(filepath, index_col=0) df.index = pd.to_datetime(df.index) df.columns = ['VIX', 'ConsumerConfidence', 'IGSpreads'] # Fill missing values df = df.fillna(method='ffill').fillna(method='bfill') return df ############################################### # 2. Helper: Observation Dates ############################################### def get_observation_dates(start_date, end_date, rebalance_period): dates = [] current = start_date while current <= end_date: dates.append(current) current += relativedelta(months=rebalance_period) return dates ############################################### # 3. Initialize the Portfolio & CASH ############################################### def initialize_portfolio(prices, date, tickers, initial_aum): portfolio = {} allocation = initial_aum / len(tickers) for ticker in tickers: price = prices.loc[date, ticker] portfolio[ticker] = allocation / price return portfolio ############################################### # 4. Lookback Metric Computation ############################################### def compute_lookback_metric(prices, current_date, ticker, lookback_period, metric_type='simple'): prices = prices.sort_index() lookback_date = current_date - relativedelta(months=lookback_period) current_price = prices[ticker].asof(current_date) lookback_price = prices[ticker].asof(lookback_date) if pd.isna(current_price) or pd.isna(lookback_price): raise ValueError(f"Missing price data for {ticker} on {current_date} or {lookback_date}.") if metric_type == 'simple': metric = (current_price / lookback_price) - 1 elif metric_type == 'sma': window = prices[ticker].loc[lookback_date:current_date] if window.empty: raise ValueError(f"No price data for {ticker} between {lookback_date} and {current_date}.") sma = window.mean() metric = (current_price - sma) / sma else: raise ValueError("Invalid metric type. Choose 'simple' or 'sma'.") return metric ############################################### # 5. Ranking Assets by Momentum ############################################### def rank_assets(prices, current_date, tickers, lookback_period, metric_type): metrics = {} for ticker in tickers: metric = compute_lookback_metric(prices, current_date, ticker, lookback_period, metric_type) metrics[ticker] = metric sorted_tickers = sorted(metrics, key=metrics.get, reverse=True) ranks = {ticker: rank+1 for rank, ticker in enumerate(sorted_tickers)} return sorted_tickers, ranks, metrics ############################################### # 6. Compute Current Portfolio Value ############################################### def compute_portfolio_value(portfolio, prices, current_date): value = 0 for ticker, quantity in portfolio.items(): price = prices.loc[current_date, ticker] value += quantity * price return value ############################################### # 7. Rebalance the Portfolio (Momentum Portion Only) ############################################### def rebalance_portfolio(portfolio, prices, current_date, tickers, sorted_tickers, internal_rebalance_ratios, rebalance_ratio): portfolio_value = compute_portfolio_value(portfolio, prices, current_date) rebalance_amount = portfolio_value * rebalance_ratio target_trades = {ticker: rebalance_amount * internal_rebalance_ratios[i] for i, ticker in enumerate(sorted_tickers)} total_sold = 0 actual_trades = {} for ticker, target_trade in target_trades.items(): price = prices.loc[current_date, ticker] if target_trade < 0: available_notional = portfolio[ticker] * price sell_target = abs(target_trade) actual_sell = min(available_notional, sell_target) actual_trades[ticker] = -actual_sell total_sold += actual_sell else: actual_trades[ticker] = 0 total_buy_target = sum(trade for trade in target_trades.values() if trade > 0) if total_buy_target > 0: for ticker, target_trade in target_trades.items(): if target_trade > 0: proportion = target_trade / total_buy_target buy_amount = total_sold * proportion actual_trades[ticker] = buy_amount new_portfolio = portfolio.copy() for ticker, trade_notional in actual_trades.items(): price = prices.loc[current_date, ticker] qty_change = trade_notional / price new_portfolio[ticker] += qty_change return new_portfolio, actual_trades, portfolio_value ############################################### # 8. Macro: Composite Risk & HMM Functions ############################################### def scale(x, low, high): """Safely scale x between 0 and 1 given bounds low and high.""" if high - low == 0: return 0.0 return np.clip((x - low) / (high - low), 0, 1) def compute_composite_risk(vix, consumer_confidence, ig_spreads, params): """ Convert each macro indicator into a risk score (0 to 1) using preset bounds. For Consumer Confidence, higher values mean lower risk, so we invert the score. """ vix_low, vix_high = params['VIX'] cc_low, cc_high = params['ConsumerConfidence'] ig_low, ig_high = params['IGSpreads'] risk_vix = scale(vix, vix_low, vix_high) risk_cc = 1 - scale(consumer_confidence, cc_low, cc_high) risk_ig = scale(ig_spreads, ig_low, ig_high) composite_risk = (risk_vix + risk_cc + risk_ig) / 3.0 if np.isnan(composite_risk) or np.isinf(composite_risk): composite_risk = 0.5 return composite_risk def fit_hmm_on_series(series, n_components=2): """ Fit a Gaussian HMM on a 1D numpy array of composite risk values. Returns the fitted model and index of the bear state (state with higher mean). """ X = series.reshape(-1, 1) model = GaussianHMM(n_components=n_components, covariance_type="diag", n_iter=1000, random_state=42) model.fit(X) state_means = model.means_.flatten() bear_state = np.argmax(state_means) return model, bear_state def compute_bear_probability(model, bear_state, current_value): """ Compute the probability that the current composite risk value is generated by the bear state. """ prob = model.predict_proba(np.array([[current_value]]))[0] p_bear = prob[bear_state] return p_bear def regime_allocation_from_hmm(p_bear, max_alloc=1.0, min_alloc=0.3, threshold=0.8): """ Map bear probability to target momentum allocation. If p_bear < threshold, return max_alloc. Otherwise, linearly interpolate. """ if p_bear < threshold: return max_alloc else: return max_alloc - (max_alloc - min_alloc) * ((p_bear - threshold) / (1 - threshold)) ############################################### # 9. Simulate the Strategy with HMM-Based Regime Switching & CASH Adjustments ############################################### def simulate_strategy_with_hmm(prices, macro, eq_tickers, fi_tickers, alts_tickers, initial_aum, start_date, end_date, rebalance_period, rebalance_ratio, lookback_period, metric_type, internal_rebalance_ratios, macro_params, max_alloc=1.0, min_alloc=0.3, hmm_window=24): """ Runs the simulation using an HMM on the composite risk signal. This updated version pre-populates the composite risk series by using macro data from the earliest date when all three indicators are available. Additionally, it records the exponentially smoothed values for VIX, ConsumerConfidence, and IGSpreads. """ tickers = eq_tickers + fi_tickers + alts_tickers obs_dates = get_observation_dates(start_date, end_date, rebalance_period) results = [] # ------------------------------- # Pre-populate composite risk series # ------------------------------- valid_macro = macro.dropna(subset=['VIX', 'ConsumerConfidence', 'IGSpreads']) earliest_macro_date = valid_macro.index.min() prepop_end_date = start_date - relativedelta(months=rebalance_period) prepop_dates = get_observation_dates(earliest_macro_date, prepop_end_date, rebalance_period) composite_risk_series = [] for d in prepop_dates: try: vix = macro['VIX'].asof(d) cc = macro['ConsumerConfidence'].asof(d) ig = macro['IGSpreads'].asof(d) except Exception as e: continue composite_risk = compute_composite_risk(vix, cc, ig, macro_params) composite_risk_series.append(composite_risk) # ------------------------------- # Initialize portfolio and record initial state # ------------------------------- portfolio = initialize_portfolio(prices, start_date, tickers, initial_aum) CASH = 0.0 portfolio_value = compute_portfolio_value(portfolio, prices, start_date) total_aum = portfolio_value + CASH prev_target_alloc = max_alloc prev_regime = 'risk-on' results.append({ 'Date': start_date, 'Portfolio Value': portfolio_value, 'Momentum AUM': portfolio_value, 'CASH': CASH, 'Total AUM': total_aum, 'VIX': np.nan, 'ConsumerConfidence': np.nan, 'IGSpreads': np.nan, 'VIX_smoothed': np.nan, 'ConsumerConfidence_smoothed': np.nan, 'IGSpreads_smoothed': np.nan, 'Composite Risk': np.nan, 'Bear Prob': np.nan, 'Target Momentum Alloc': prev_target_alloc, 'Regime': prev_regime, 'Return': 0, **{f'qty_{ticker}': portfolio[ticker] for ticker in tickers}, **{f'notional_{ticker}': portfolio[ticker] * prices.loc[start_date, ticker] for ticker in tickers}, }) prev_total_aum = total_aum # ------------------------------- # Main simulation loop # ------------------------------- for current_date in obs_dates[1:]: # Retrieve macro data using asof to get the nearest available value. try: vix = macro['VIX'].asof(current_date) cc = macro['ConsumerConfidence'].asof(current_date) ig = macro['IGSpreads'].asof(current_date) except Exception as e: raise ValueError(f"Error retrieving macro data for {current_date}: {e}") # Apply exponential smoothing using a window window = 10 vix_smoothed = macro["VIX"].ewm(span=window, adjust=False).mean().asof(current_date) cc_smoothed = macro["ConsumerConfidence"].ewm(span=window, adjust=False).mean().asof(current_date) ig_smoothed = macro["IGSpreads"].ewm(span=window, adjust=False).mean().asof(current_date) # Compute composite risk (using raw values; adjust if you want to use smoothed values). composite_risk = compute_composite_risk(vix_smoothed, cc_smoothed, ig_smoothed, macro_params) composite_risk_series.append(composite_risk) # Fit HMM if we have enough data. if len(composite_risk_series) >= hmm_window: window_data = np.array(composite_risk_series[-hmm_window:]) if np.any(np.isnan(window_data)) or np.any(np.isinf(window_data)): window_data = np.nan_to_num(window_data, nan=0.5, posinf=0.5, neginf=0.5) model, bear_state = fit_hmm_on_series(window_data) p_bear = compute_bear_probability(model, bear_state, composite_risk) else: p_bear = 0.0 # Default to risk-on if insufficient data. # Map bear probability to target momentum allocation. target_alloc = regime_allocation_from_hmm(p_bear, max_alloc, min_alloc, threshold=0.8) regime = 'risk-on' if target_alloc >= max_alloc * 0.99 else 'risk-off' # Rebalance momentum portfolio. sorted_tickers, ranks, metrics = rank_assets(prices, current_date, tickers, lookback_period, metric_type) portfolio, trades, pre_rebalance_value = rebalance_portfolio( portfolio, prices, current_date, tickers, sorted_tickers, internal_rebalance_ratios, rebalance_ratio) # Compute momentum portfolio value and total investment. mom_value = compute_portfolio_value(portfolio, prices, current_date) total_investment = mom_value + CASH # Determine desired momentum value and desired CASH. desired_mom_value = target_alloc * total_investment desired_cash = total_investment - desired_mom_value # Adjust portfolio and CASH. if mom_value > desired_mom_value: excess = mom_value - desired_mom_value for ticker in portfolio.keys(): price = prices.loc[current_date, ticker] ticker_value = portfolio[ticker] * price sell_amount = (ticker_value / mom_value) * excess qty_to_sell = sell_amount / price portfolio[ticker] -= qty_to_sell CASH += excess adjustment_note = f"Sold excess {excess:.2f} to CASH." elif mom_value < desired_mom_value: shortage = desired_mom_value - mom_value if CASH >= shortage: for ticker in portfolio.keys(): price = prices.loc[current_date, ticker] ticker_value = portfolio[ticker] * price target_weight = ticker_value / mom_value if mom_value > 0 else 1/len(portfolio) invest_amount = target_weight * shortage qty_to_buy = invest_amount / price portfolio[ticker] += qty_to_buy CASH -= shortage adjustment_note = f"Bought into portfolio using {shortage:.2f} CASH." else: invest_amount = CASH for ticker in portfolio.keys(): price = prices.loc[current_date, ticker] ticker_value = portfolio[ticker] * price target_weight = ticker_value / mom_value if mom_value > 0 else 1/len(portfolio) qty_to_buy = (target_weight * invest_amount) / price portfolio[ticker] += qty_to_buy CASH = 0 adjustment_note = f"Partial buy using all available CASH." else: adjustment_note = "No cash adjustment needed." mom_value = compute_portfolio_value(portfolio, prices, current_date) total_aum = mom_value + CASH ret = (total_aum - prev_total_aum) / prev_total_aum prev_total_aum = total_aum row = { 'Date': current_date, 'Portfolio Value': mom_value, 'Momentum AUM': mom_value, 'CASH': CASH, 'Total AUM': total_aum, 'Pre-Rebalance Value': pre_rebalance_value, 'VIX': vix, 'ConsumerConfidence': cc, 'IGSpreads': ig, 'VIX_smoothed': vix_smoothed, 'ConsumerConfidence_smoothed': cc_smoothed, 'IGSpreads_smoothed': ig_smoothed, 'Composite Risk': composite_risk, 'Bear Prob': p_bear, 'Target Momentum Alloc': target_alloc, 'Regime': regime, 'Adjustment Note': adjustment_note, 'Return': ret } for ticker in tickers: row[f'qty_{ticker}'] = portfolio[ticker] row[f'notional_{ticker}'] = portfolio[ticker] * prices.loc[current_date, ticker] row[f'weight_{ticker}'] = (portfolio[ticker] * prices.loc[current_date, ticker]) / mom_value if mom_value > 0 else np.nan row[f'rank_{ticker}'] = ranks.get(ticker, np.nan) row[f'metric_{ticker}'] = metrics.get(ticker, np.nan) row[f'trade_{ticker}'] = trades.get(ticker, 0) results.append(row) result_df = pd.DataFrame(results) result_df.set_index('Date', inplace=True) diag_columns = ['Total AUM', 'Momentum AUM', 'CASH', 'VIX', 'ConsumerConfidence', 'IGSpreads', 'VIX_smoothed', 'ConsumerConfidence_smoothed', 'IGSpreads_smoothed', 'Composite Risk', 'Bear Prob', 'Target Momentum Alloc', 'Regime', 'Adjustment Note', 'Return'] for prefix in ['qty_', 'notional_', 'weight_', 'rank_', 'metric_', 'trade_']: diag_columns.extend([f'{prefix}{ticker}' for ticker in tickers]) result_df = result_df[diag_columns] return result_df ############################################### # 10. Main – Example Usage ############################################### if __name__ == '__main__': # Define asset tickers. eq_tickers = ['SPY US Equity'] fi_tickers = ['TLT US Equity', 'HYG US Equity'] alts_tickers = ['GLD US Equity', 'SHV US Equity', 'VNQ US Equity'] initial_aum = 100e6 # 100 million start_date = pd.to_datetime('2012-01-01') end_date = pd.to_datetime('2025-02-01') rebalance_period = 1 # monthly (or adjust as desired) rebalance_ratio = 0.2 # 20% of current momentum AUM rebalanced each period lookback_period = 6 # 3-month lookback metric_type = 'simple' internal_rebalance_ratios = [0.7, 0.3, 0, 0, -0.3, -0.7] # Macro parameters (bounds for each indicator) macro_params = { 'VIX': (10, 40), 'ConsumerConfidence': (45, 130), # Adjusted lower bound to 80 'IGSpreads': (100, 200) } # File paths. price_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Codes\Historic Prices.xlsx" macro_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\macro_indic.xlsx" prices = load_price_data(price_filepath) macro = load_macro_data(macro_filepath) # Set hmm_window to 24 months (or adjust as needed) result_df = simulate_strategy_with_hmm(prices, macro, eq_tickers, fi_tickers, alts_tickers, initial_aum, start_date, end_date, rebalance_period, rebalance_ratio, lookback_period, metric_type, internal_rebalance_ratios, macro_params, max_alloc=1.0, min_alloc=0.5, hmm_window=24) pd.set_option('display.float_format', lambda x: f'{x:,.2f}') # print(result_df)
Editor is loading...
Leave a Comment