import streamlit as st import pandas as pd import numpy as np import plotly.express as px import plotly.graph_objects as go from pathlib import Path import json from datetime import datetime, timedelta from typing import List, Dict, Tuple, Optional, Any, Callable, T import time import threading from concurrent.futures import ThreadPoolExecutor, as_completed import yfinance as yf import requests from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry import os import sys import logging import traceback from dotenv import load_dotenv import re from ETF_Portal.services.drip_service import DRIPService, DripConfig from ETF_Portal.services.etf_selection_service import ETFSelectionService, InvestmentGoal, RiskTolerance from ETF_Portal.services.nav_erosion_service import NavErosionService from ETF_Portal.services.data_service import DataService # Load environment variables load_dotenv(override=True) # Force reload of environment variables # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Global settings USE_FMP_API = True # Default to using FMP API if available # FMP API configuration FMP_API_KEY = os.getenv('FMP_API_KEY') if not FMP_API_KEY: logger.warning("FMP_API_KEY not found in environment variables") logger.warning("Current environment variables: %s", dict(os.environ)) logger.warning("Current working directory: %s", os.getcwd()) logger.warning("Files in current directory: %s", os.listdir('.')) if os.path.exists('.env'): logger.warning(".env file exists") with open('.env', 'r') as f: logger.warning("Contents of .env file: %s", f.read()) else: logger.warning(".env file does not exist") else: logger.info("FMP_API_KEY loaded successfully") # Mask the API key for security in logs masked_key = FMP_API_KEY[:4] + '*' * (len(FMP_API_KEY) - 8) + FMP_API_KEY[-4:] logger.info("FMP_API_KEY (masked): %s", masked_key) FMP_BASE_URL = "https://financialmodelingprep.com/api/v3" def test_fmp_data_fetching(): """Test FMP API data fetching with detailed logging.""" try: logger.info("=== Starting FMP API Test ===") logger.info(f"FMP API Key available: {bool(FMP_API_KEY)}") logger.info(f"FMP API enabled: {USE_FMP_API}") # Test a few high-yield ETFs test_tickers = ["JEPI", "JEPQ", "QYLD"] for ticker in test_tickers: logger.info(f"\nTesting {ticker}:") data = fetch_etf_data_fmp(ticker) if data: logger.info(f"Successfully fetched data for {ticker}") logger.info(f"Data source: {data.get('data_source', 'Not specified')}") logger.info(f"Raw data: {data.get('raw_data', 'No raw data')}") else: logger.error(f"Failed to fetch data for {ticker}") logger.info("=== FMP API Test Complete ===") except Exception as e: logger.error(f"Error in FMP API test: {str(e)}") logger.error(traceback.format_exc()) # High-yield ETFs reference data HIGH_YIELD_ETFS = { "MSTY": {"expected_yield": 125.0, "frequency": "Monthly"}, # 125% "SMCY": {"expected_yield": 100.0, "frequency": "Monthly"}, # 100% "TSLY": {"expected_yield": 85.0, "frequency": "Monthly"}, # 85% "NVDY": {"expected_yield": 75.0, "frequency": "Monthly"}, # 75% "ULTY": {"expected_yield": 70.0, "frequency": "Monthly"}, # 70% "JEPQ": {"expected_yield": 9.5, "frequency": "Monthly"}, # 9.5% "JEPI": {"expected_yield": 7.8, "frequency": "Monthly"}, # 7.8% "XYLD": {"expected_yield": 12.0, "frequency": "Monthly"}, # 12.0% "QYLD": {"expected_yield": 12.0, "frequency": "Monthly"}, # 12.0% "RYLD": {"expected_yield": 12.0, "frequency": "Monthly"} # 12.0% } def calculate_erosion_risk(yield_pct: float) -> Dict[str, float]: """ Calculate erosion risk based on yield percentage. Higher yields have higher erosion risk. Args: yield_pct: Yield percentage Returns: Dictionary with NAV and yield erosion risk scores (0-9) """ # Base erosion risk calculation if yield_pct >= 100: # Ultra high yield (100%+) nav_risk = 9 yield_risk = 9 elif yield_pct >= 50: # Very high yield (50-100%) nav_risk = 8 yield_risk = 8 elif yield_pct >= 25: # High yield (25-50%) nav_risk = 7 yield_risk = 7 elif yield_pct >= 15: # Medium-high yield (15-25%) nav_risk = 6 yield_risk = 6 elif yield_pct >= 10: # Medium yield (10-15%) nav_risk = 5 yield_risk = 5 elif yield_pct >= 5: # Medium-low yield (5-10%) nav_risk = 4 yield_risk = 4 elif yield_pct >= 3: # Low yield (3-5%) nav_risk = 3 yield_risk = 3 elif yield_pct >= 1: # Very low yield (1-3%) nav_risk = 2 yield_risk = 2 else: # Ultra low yield (<1%) nav_risk = 1 yield_risk = 1 return { "nav_risk": nav_risk, "yield_risk": yield_risk } def calculate_etf_metrics(ticker: str, price_data: pd.DataFrame, dividend_data: pd.DataFrame) -> Dict[str, Any]: """ Calculate ETF metrics based on available data. Args: ticker: ETF ticker price_data: DataFrame with price history dividend_data: DataFrame with dividend history Returns: Dictionary with calculated metrics """ metrics = { "Ticker": ticker, "Yield (%)": 0.0, "Price": 0.0, "volatility": 0.0, "sharpe_ratio": 0.0, "sortino_ratio": 0.0, "correlation": 0.0, "payout_ratio": 0.0, "score": 0.0, "Risk Level": "Unknown", "missing_metrics": [] } try: # Get current price from price data if not price_data.empty: metrics["Price"] = price_data["close"].iloc[-1] else: metrics["missing_metrics"].append("Price") # Calculate yield if dividend data is available if not dividend_data.empty and metrics["Price"] > 0: # Convert date column to datetime if it's not already dividend_data["date"] = pd.to_datetime(dividend_data["date"]) # Get dividends from the last 12 months one_year_ago = pd.Timestamp.now() - pd.Timedelta(days=365) recent_dividends = dividend_data[dividend_data["date"] >= one_year_ago] if not recent_dividends.empty: # Calculate TTM dividend ttm_dividend = recent_dividends["dividend"].sum() # Calculate annual yield metrics["Yield (%)"] = (ttm_dividend / metrics["Price"]) * 100 # For high-yield ETFs, use the expected yield if available if ticker in HIGH_YIELD_ETFS: metrics["Yield (%)"] = HIGH_YIELD_ETFS[ticker]["expected_yield"] logger.info(f"Calculated yield for {ticker}: {metrics['Yield (%)']:.2f}% (TTM dividend: ${ttm_dividend:.2f}, Price: ${metrics['Price']:.2f})") else: logger.warning(f"No recent dividends found for {ticker}") metrics["missing_metrics"].append("Yield (%)") else: metrics["missing_metrics"].append("Yield (%)") # Calculate volatility if price data is available if len(price_data) > 1: returns = price_data["close"].pct_change().dropna() metrics["volatility"] = returns.std() * np.sqrt(252) * 100 # Annualized volatility else: metrics["missing_metrics"].append("volatility") # Calculate Sharpe ratio if we have returns and risk-free rate if len(price_data) > 1: risk_free_rate = 0.05 # Assuming 5% risk-free rate excess_returns = returns - (risk_free_rate / 252) if excess_returns.std() != 0: metrics["sharpe_ratio"] = (excess_returns.mean() / excess_returns.std()) * np.sqrt(252) else: metrics["missing_metrics"].append("sharpe_ratio") # Calculate Sortino ratio if we have returns if len(price_data) > 1: downside_returns = returns[returns < 0] if len(downside_returns) > 0 and downside_returns.std() != 0: metrics["sortino_ratio"] = (returns.mean() / downside_returns.std()) * np.sqrt(252) else: metrics["missing_metrics"].append("sortino_ratio") # Calculate erosion risk based on yield erosion_risk = calculate_erosion_risk(metrics["Yield (%)"]) metrics["nav_erosion_risk"] = erosion_risk["nav_risk"] # Categorize risk based on available metrics metrics["Risk Level"] = categorize_etf_risk(metrics) # Calculate overall score metrics["score"] = calculate_etf_score(metrics) logger.info(f"Calculated metrics for {ticker}: {metrics}") return metrics except Exception as e: logger.error(f"Error calculating metrics for {ticker}: {str(e)}") logger.error(traceback.format_exc()) return metrics def categorize_etf_risk(metrics: Dict[str, Any]) -> str: """ Categorize ETF risk based on available metrics. Args: metrics: Dictionary with ETF metrics Returns: Risk category: "Low", "Medium", or "High" """ try: # Initialize risk score risk_score = 0 available_metrics = 0 # Yield-based risk (higher yield = higher risk) if "Yield (%)" not in metrics["missing_metrics"]: if metrics["Yield (%)"] > 10: risk_score += 3 elif metrics["Yield (%)"] > 6: risk_score += 2 else: risk_score += 1 available_metrics += 1 # Volatility-based risk if "volatility" not in metrics["missing_metrics"]: if metrics["volatility"] > 20: risk_score += 3 elif metrics["volatility"] > 15: risk_score += 2 else: risk_score += 1 available_metrics += 1 # Sharpe ratio-based risk (lower Sharpe = higher risk) if "sharpe_ratio" not in metrics["missing_metrics"]: if metrics["sharpe_ratio"] < 0.5: risk_score += 3 elif metrics["sharpe_ratio"] < 1.0: risk_score += 2 else: risk_score += 1 available_metrics += 1 # Sortino ratio-based risk (lower Sortino = higher risk) if "sortino_ratio" not in metrics["missing_metrics"]: if metrics["sortino_ratio"] < 0.5: risk_score += 3 elif metrics["sortino_ratio"] < 1.0: risk_score += 2 else: risk_score += 1 available_metrics += 1 # Calculate average risk score if available_metrics > 0: avg_risk_score = risk_score / available_metrics if avg_risk_score > 2.5: return "High" elif avg_risk_score > 1.5: return "Medium" else: return "Low" # If no metrics available, use yield as fallback if metrics["Yield (%)"] > 10: return "High" elif metrics["Yield (%)"] > 6: return "Medium" else: return "Low" except Exception as e: logger.error(f"Error categorizing ETF risk: {str(e)}") return "Unknown" def calculate_etf_score(metrics: Dict[str, Any]) -> float: """ Calculate overall ETF score based on available metrics. Args: metrics: Dictionary with ETF metrics Returns: Overall score (0-100) """ try: score = 0 available_metrics = 0 # Yield score (0-25 points) if "Yield (%)" not in metrics["missing_metrics"]: if metrics["Yield (%)"] > 10: score += 25 elif metrics["Yield (%)"] > 6: score += 20 elif metrics["Yield (%)"] > 3: score += 15 else: score += 10 available_metrics += 1 # Volatility score (0-25 points) if "volatility" not in metrics["missing_metrics"]: if metrics["volatility"] < 10: score += 25 elif metrics["volatility"] < 15: score += 20 elif metrics["volatility"] < 20: score += 15 else: score += 10 available_metrics += 1 # Sharpe ratio score (0-25 points) if "sharpe_ratio" not in metrics["missing_metrics"]: if metrics["sharpe_ratio"] > 1.5: score += 25 elif metrics["sharpe_ratio"] > 1.0: score += 20 elif metrics["sharpe_ratio"] > 0.5: score += 15 else: score += 10 available_metrics += 1 # Sortino ratio score (0-25 points) if "sortino_ratio" not in metrics["missing_metrics"]: if metrics["sortino_ratio"] > 1.5: score += 25 elif metrics["sortino_ratio"] > 1.0: score += 20 elif metrics["sortino_ratio"] > 0.5: score += 15 else: score += 10 available_metrics += 1 # Calculate final score if available_metrics > 0: return score / available_metrics return 0 except Exception as e: logger.error(f"Error calculating ETF score: {str(e)}") return 0 def calculate_correlation_matrix(price_data_dict: Dict[str, pd.DataFrame]) -> pd.DataFrame: """ Calculate correlation matrix between ETFs. Args: price_data_dict: Dictionary of price DataFrames for each ETF Returns: DataFrame with correlation matrix """ try: # Create a DataFrame with returns for all ETFs returns_df = pd.DataFrame() for ticker, price_data in price_data_dict.items(): if len(price_data) > 1: returns = price_data["close"].pct_change().dropna() returns_df[ticker] = returns if returns_df.empty: logger.warning("No valid price data for correlation calculation") return pd.DataFrame() # Calculate correlation matrix corr_matrix = returns_df.corr() logger.info(f"Correlation matrix calculated:\n{corr_matrix}") return corr_matrix except Exception as e: logger.error(f"Error calculating correlation matrix: {str(e)}") logger.error(traceback.format_exc()) return pd.DataFrame() def calculate_etf_risk_score(etf: Dict[str, Any]) -> float: """ Calculate a comprehensive risk score for an ETF based on multiple metrics. Args: etf: Dictionary containing ETF metrics Returns: float: Risk score (0-100, higher means higher risk) """ try: score = 0 metrics_used = 0 # Primary Metrics (60% of total score) # 1. Volatility (20%) if 'volatility' in etf: volatility = etf['volatility'] if volatility < 10: score += 20 elif volatility < 15: score += 15 elif volatility < 20: score += 10 else: score += 5 metrics_used += 1 # 2. Yield (20%) if 'yield' in etf: yield_value = etf['yield'] if yield_value < 3: score += 5 elif yield_value < 6: score += 10 elif yield_value < 10: score += 15 else: score += 20 metrics_used += 1 # 3. Sharpe/Sortino Ratio (20%) if 'sharpe_ratio' in etf: sharpe = etf['sharpe_ratio'] if sharpe > 1.5: score += 5 elif sharpe > 1.0: score += 10 elif sharpe > 0.8: score += 15 else: score += 20 metrics_used += 1 # Secondary Metrics (40% of total score) # 1. Dividend Growth (10%) if 'dividend_growth' in etf: growth = etf['dividend_growth'] if growth > 10: score += 5 elif growth > 5: score += 7 elif growth > 0: score += 10 else: score += 15 metrics_used += 1 # 2. Payout Ratio (10%) if 'payout_ratio' in etf: ratio = etf['payout_ratio'] if ratio < 40: score += 5 elif ratio < 60: score += 7 elif ratio < 80: score += 10 else: score += 15 metrics_used += 1 # 3. Expense Ratio (10%) if 'expense_ratio' in etf: ratio = etf['expense_ratio'] if ratio < 0.2: score += 5 elif ratio < 0.4: score += 7 elif ratio < 0.6: score += 10 else: score += 15 metrics_used += 1 # 4. AUM/Volume (10%) if 'aum' in etf: aum = etf['aum'] if aum > 5e9: # > $5B score += 5 elif aum > 1e9: # > $1B score += 7 elif aum > 500e6: # > $500M score += 10 else: score += 15 metrics_used += 1 # Normalize score based on available metrics if metrics_used > 0: return score / metrics_used return 50 # Default middle score if no metrics available except Exception as e: logger.error(f"Error calculating ETF risk score: {str(e)}") return 50 def optimize_portfolio_allocation(etf_metrics: List[Dict[str, Any]], risk_tolerance: str, correlation_matrix: pd.DataFrame) -> List[Dict[str, Any]]: """ Optimize portfolio allocation based on risk tolerance and ETF metrics. Args: etf_metrics: List of ETF metrics dictionaries risk_tolerance: Risk tolerance level ("Conservative", "Moderate", "Aggressive") correlation_matrix: Correlation matrix between ETFs Returns: List of dictionaries with ETF tickers and their allocations """ try: logger.info(f"Optimizing portfolio allocation for {risk_tolerance} risk tolerance") logger.info(f"ETF metrics: {etf_metrics}") # Sort ETFs by yield (higher yield = higher risk) sorted_etfs = sorted(etf_metrics, key=lambda x: x.get('Yield (%)', 0), reverse=True) logger.info(f"Sorted ETFs by yield: {[etf['Ticker'] for etf in sorted_etfs]}") # Calculate base allocations based on risk tolerance num_etfs = len(sorted_etfs) if num_etfs == 0: return [] if risk_tolerance == "Conservative": # For conservative, allocate more to lower yielding ETFs # This naturally requires more capital for the same income base_allocations = [] remaining_alloc = 100 for i in range(num_etfs): if i < num_etfs - 1: # Allocate more to lower yielding ETFs alloc = remaining_alloc * 0.4 # 40% of remaining base_allocations.append(alloc) remaining_alloc -= alloc else: # Last ETF gets remaining allocation base_allocations.append(remaining_alloc) elif risk_tolerance == "Moderate": # For moderate, allocate more to middle yielding ETFs # This naturally requires medium capital for the same income base_allocations = [] remaining_alloc = 100 for i in range(num_etfs): if i < num_etfs - 1: # Allocate more to middle yielding ETFs alloc = remaining_alloc * 0.5 # 50% of remaining base_allocations.append(alloc) remaining_alloc -= alloc else: # Last ETF gets remaining allocation base_allocations.append(remaining_alloc) else: # Aggressive # For aggressive, allocate more to higher yielding ETFs # This naturally requires less capital for the same income base_allocations = [] remaining_alloc = 100 for i in range(num_etfs): if i < num_etfs - 1: # Allocate more to higher yielding ETFs alloc = remaining_alloc * 0.6 # 60% of remaining base_allocations.append(alloc) remaining_alloc -= alloc else: # Last ETF gets remaining allocation base_allocations.append(remaining_alloc) # Create final allocation list final_allocations = [] for etf, allocation in zip(sorted_etfs, base_allocations): final_allocations.append({ "ticker": etf["Ticker"], "allocation": allocation # Already in percentage }) logger.info(f"Final allocations: {final_allocations}") return final_allocations except Exception as e: logger.error(f"Error in optimize_portfolio_allocation: {str(e)}") logger.error(traceback.format_exc()) return [] def adjust_allocations_for_correlation( allocations: Dict[str, float], correlation_matrix: pd.DataFrame ) -> Dict[str, float]: """ Adjust allocations to reduce correlation between ETFs. Args: allocations: Dictionary with current allocations correlation_matrix: Correlation matrix between ETFs Returns: Dictionary with adjusted allocations """ try: adjusted_allocations = allocations.copy() # Get highly correlated pairs (correlation > 0.7) high_corr_pairs = [] for i in range(len(correlation_matrix.columns)): for j in range(i + 1, len(correlation_matrix.columns)): ticker1 = correlation_matrix.columns[i] ticker2 = correlation_matrix.columns[j] if abs(correlation_matrix.iloc[i, j]) > 0.7: high_corr_pairs.append((ticker1, ticker2)) # Adjust allocations for highly correlated pairs for ticker1, ticker2 in high_corr_pairs: if ticker1 in adjusted_allocations and ticker2 in adjusted_allocations: # Reduce allocation to the ETF with lower score if adjusted_allocations[ticker1] > adjusted_allocations[ticker2]: reduction = adjusted_allocations[ticker1] * 0.1 # Reduce by 10% adjusted_allocations[ticker1] -= reduction adjusted_allocations[ticker2] += reduction else: reduction = adjusted_allocations[ticker2] * 0.1 # Reduce by 10% adjusted_allocations[ticker2] -= reduction adjusted_allocations[ticker1] += reduction logger.info(f"Adjusted allocations for correlation: {adjusted_allocations}") return adjusted_allocations except Exception as e: logger.error(f"Error adjusting allocations for correlation: {str(e)}") logger.error(traceback.format_exc()) return allocations def get_fmp_session(): """Create a session with retry logic for FMP API calls.""" session = requests.Session() retries = Retry(total=3, backoff_factor=0.5) session.mount('https://', HTTPAdapter(max_retries=retries)) return session def fetch_etf_data_fmp(ticker: str) -> Optional[Dict[str, Any]]: """ Fetch ETF data from Financial Modeling Prep API. Args: ticker: ETF ticker symbol Returns: Dictionary with ETF data or None if failed """ try: if not FMP_API_KEY: logger.warning("FMP API key not configured in environment variables") st.warning("FMP API key not found in environment variables. Some features may be limited.") return None session = get_fmp_session() # Get profile data for current price profile_url = f"{FMP_BASE_URL}/profile/{ticker}?apikey={FMP_API_KEY}" logger.info(f"[FMP API] Making profile request to: {profile_url}") profile_response = session.get(profile_url) st.session_state.api_calls += 1 logger.info(f"[FMP API] Profile response status: {profile_response.status_code}") logger.info(f"[FMP API] Profile response content: {profile_response.text[:500]}...") # Log first 500 chars if profile_response.status_code != 200: logger.error(f"[FMP API] Error for {ticker}: {profile_response.status_code}") logger.error(f"[FMP API] Response content: {profile_response.text}") return None profile_data = profile_response.json() logger.info(f"[FMP API] Profile data for {ticker}: {profile_data}") if not profile_data or not isinstance(profile_data, list) or len(profile_data) == 0: logger.warning(f"[FMP API] No profile data found for {ticker}") return None profile = profile_data[0] current_price = float(profile.get('price', 0)) if current_price <= 0: logger.error(f"[FMP API] Invalid price for {ticker}: {current_price}") return None # Get dividend history dividend_url = f"{FMP_BASE_URL}/historical-price-full/stock_dividend/{ticker}?apikey={FMP_API_KEY}" logger.info(f"[FMP API] Making dividend request to: {dividend_url}") dividend_response = session.get(dividend_url) st.session_state.api_calls += 1 logger.info(f"[FMP API] Dividend response status: {dividend_response.status_code}") logger.info(f"[FMP API] Dividend response content: {dividend_response.text[:500]}...") # Log first 500 chars if dividend_response.status_code != 200: logger.error(f"[FMP API] Error for dividend data: {dividend_response.status_code}") logger.error(f"[FMP API] Response content: {dividend_response.text}") return None dividend_data = dividend_response.json() logger.info(f"[FMP API] Dividend data for {ticker}: {dividend_data}") if not dividend_data or "historical" not in dividend_data or not dividend_data["historical"]: logger.warning(f"[FMP API] No dividend history found for {ticker}") return None # Calculate TTM dividend dividends = pd.DataFrame(dividend_data["historical"]) dividends["date"] = pd.to_datetime(dividends["date"]) dividends = dividends.sort_values("date") # Get dividends in the last 12 months one_year_ago = pd.Timestamp.now() - pd.Timedelta(days=365) recent_dividends = dividends[dividends["date"] >= one_year_ago] if recent_dividends.empty: logger.warning(f"[FMP API] No recent dividends found for {ticker}") return None # Calculate TTM dividend ttm_dividend = recent_dividends["dividend"].sum() # Calculate yield yield_pct = (ttm_dividend / current_price) * 100 logger.info(f"[FMP API] Calculated yield for {ticker}: {yield_pct:.2f}% (TTM dividend: ${ttm_dividend:.2f}, Price: ${current_price:.2f})") # For high-yield ETFs, verify the yield is reasonable if ticker in HIGH_YIELD_ETFS: expected_yield = HIGH_YIELD_ETFS[ticker]["expected_yield"] if yield_pct < expected_yield * 0.5: # If yield is less than 50% of expected logger.error(f"[FMP API] Calculated yield {yield_pct:.2f}% for {ticker} is much lower than expected {expected_yield}%") logger.error(f"[FMP API] TTM dividend: ${ttm_dividend:.2f}") logger.error(f"[FMP API] Current price: ${current_price:.2f}") logger.error(f"[FMP API] Recent dividends:\n{recent_dividends}") # Determine distribution period if len(recent_dividends) >= 2: intervals = recent_dividends["date"].diff().dt.days.dropna() avg_interval = intervals.mean() if avg_interval <= 45: dist_period = "Monthly" elif avg_interval <= 100: dist_period = "Quarterly" elif avg_interval <= 200: dist_period = "Semi-Annually" else: dist_period = "Annually" else: dist_period = "Unknown" etf_data = { "Ticker": ticker, "Price": current_price, "Yield (%)": yield_pct, "Distribution Period": dist_period, "Risk Level": "High" if ticker in HIGH_YIELD_ETFS else "Moderate", "data_source": "FMP API", # Add data source identifier "raw_data": { # Store raw data for debugging "profile": profile, "dividend_history": dividend_data["historical"][:5] # Store first 5 dividend records } } logger.info(f"[FMP API] Final data for {ticker}: {etf_data}") return etf_data except Exception as e: logger.error(f"[FMP API] Error fetching data for {ticker}: {str(e)}") logger.error(traceback.format_exc()) return None def fetch_etf_data_yfinance(ticker: str) -> Optional[Dict[str, Any]]: """ Fetch ETF data from yfinance as fallback. Args: ticker: ETF ticker symbol Returns: Dictionary with ETF data or None if failed """ try: logger.info(f"Fetching yfinance data for {ticker}") etf = yf.Ticker(ticker) info = etf.info # Get the most recent dividend yield if 'dividendYield' in info and info['dividendYield'] is not None: yield_pct = info['dividendYield'] * 100 logger.info(f"Found dividend yield in yfinance for {ticker}: {yield_pct:.2f}%") else: # Try to calculate from dividend history hist = etf.history(period="1y") if not hist.empty and 'Dividends' in hist.columns: annual_dividend = hist['Dividends'].sum() current_price = info.get('regularMarketPrice', 0) yield_pct = (annual_dividend / current_price) * 100 if current_price > 0 else 0 logger.info(f"Calculated yield from history for {ticker}: {yield_pct:.2f}%") else: yield_pct = 0 logger.warning(f"No yield data found for {ticker} in yfinance") # Get current price current_price = info.get('regularMarketPrice', 0) if current_price <= 0: current_price = info.get('regularMarketPreviousClose', 0) logger.warning(f"Using previous close price for {ticker}: {current_price}") etf_data = { "Ticker": ticker, "Price": current_price, "Yield (%)": yield_pct, "Risk Level": "High", # Default for high-yield ETFs "data_source": "yfinance" # Add data source identifier } logger.info(f"yfinance data for {ticker}: {etf_data}") return etf_data except Exception as e: logger.error(f"Error fetching yfinance data for {ticker}: {str(e)}") return None def fetch_etf_data(tickers: List[str]) -> pd.DataFrame: """ Fetch ETF data using FMP API with yfinance fallback. Uses HIGH_YIELD_ETFS data only as a last resort. Args: tickers: List of ETF tickers Returns: DataFrame with ETF data """ try: data = {} cache_dir = Path("cache") cache_dir.mkdir(exist_ok=True) logger.info("=== Starting ETF data fetch ===") logger.info(f"Force refresh enabled: {st.session_state.get('force_refresh_data', False)}") logger.info(f"Cache directory: {cache_dir.absolute()}") logger.info(f"FMP API enabled: {USE_FMP_API}") logger.info(f"FMP API key available: {bool(FMP_API_KEY)}") for ticker in tickers: if not ticker: # Skip empty tickers continue logger.info(f"\n=== Processing {ticker} ===") # Check cache first if not forcing refresh cache_file = cache_dir / f"{ticker}_data.json" logger.info(f"Cache file path: {cache_file.absolute()}") logger.info(f"Cache file exists: {cache_file.exists()}") if not st.session_state.get("force_refresh_data", False) and cache_file.exists(): try: with open(cache_file, 'r') as f: cached_data = json.load(f) cache_time = datetime.fromisoformat(cached_data.get('timestamp', '2000-01-01')) cache_age = datetime.now() - cache_time logger.info(f"Cache age: {cache_age.total_seconds() / 3600:.2f} hours") if cache_age < timedelta(hours=24): logger.info(f"Using cached data for {ticker}") data[ticker] = cached_data['data'] continue else: logger.info(f"Cache expired for {ticker} (age: {cache_age.total_seconds() / 3600:.2f} hours)") except Exception as e: logger.warning(f"Error reading cache for {ticker}: {str(e)}") logger.warning(traceback.format_exc()) else: logger.info(f"No cache found or force refresh enabled for {ticker}") # Try FMP first if enabled if USE_FMP_API and FMP_API_KEY: logger.info(f"Attempting to fetch data from FMP API for {ticker}") etf_data = fetch_etf_data_fmp(ticker) if etf_data is not None: logger.info(f"Successfully fetched data from FMP API for {ticker}") # Cache the data try: cache_data = { 'timestamp': datetime.now().isoformat(), 'data': etf_data } with open(cache_file, 'w') as f: json.dump(cache_data, f) logger.info(f"Cached FMP data for {ticker}") except Exception as e: logger.warning(f"Error caching FMP data for {ticker}: {str(e)}") logger.warning(traceback.format_exc()) data[ticker] = etf_data st.session_state.api_calls += 1 logger.info(f"Total API calls: {st.session_state.api_calls}") continue else: logger.warning(f"FMP API fetch failed for {ticker}, falling back to yfinance") # If FMP fails, try yfinance logger.info(f"Fetching data from yfinance for {ticker}") etf_data = fetch_etf_data_yfinance(ticker) if etf_data is not None: logger.info(f"Successfully fetched data from yfinance for {ticker}") # Cache the data try: cache_data = { 'timestamp': datetime.now().isoformat(), 'data': etf_data } with open(cache_file, 'w') as f: json.dump(cache_data, f) logger.info(f"Cached yfinance data for {ticker}") except Exception as e: logger.warning(f"Error caching yfinance data for {ticker}: {str(e)}") logger.warning(traceback.format_exc()) data[ticker] = etf_data continue # Only use HIGH_YIELD_ETFS data if both FMP and yfinance failed if ticker in HIGH_YIELD_ETFS: logger.info(f"Using fallback data from HIGH_YIELD_ETFS for {ticker}") etf_data = { "Ticker": ticker, "Price": 25.0, # Default price for fallback "Yield (%)": HIGH_YIELD_ETFS[ticker]["expected_yield"], "Distribution Period": HIGH_YIELD_ETFS[ticker]["frequency"], "Risk Level": "High", "data_source": "HIGH_YIELD_ETFS" } data[ticker] = etf_data else: logger.error(f"Failed to fetch data for {ticker} from all sources") if not data: st.error("No ETF data could be fetched") return pd.DataFrame() df = pd.DataFrame(data.values()) # Validate the data if df.empty: st.error("No ETF data could be fetched") return pd.DataFrame() if (df["Price"] <= 0).any(): st.error("Some ETFs have invalid prices") return pd.DataFrame() if (df["Yield (%)"] <= 0).any(): st.warning("Some ETFs have zero or negative yields") # Log data sources used if "data_source" in df.columns: source_counts = df["data_source"].value_counts() logger.info(f"Data sources used:\n{source_counts}") logger.info(f"Final DataFrame:\n{df}") return df except Exception as e: st.error(f"Error fetching ETF data: {str(e)}") logger.error(f"Error in fetch_etf_data: {str(e)}") logger.error(traceback.format_exc()) return pd.DataFrame() def run_portfolio_simulation( tickers: List[str], weights: List[float], initial_investment: float, start_date: str, end_date: str, rebalance_frequency: str = 'monthly', use_fmp: bool = True ) -> Dict[str, Any]: """ Run portfolio simulation with the given parameters. Args: tickers: List of ETF tickers weights: List of portfolio weights initial_investment: Initial investment amount start_date: Start date for simulation end_date: End date for simulation rebalance_frequency: Frequency of rebalancing use_fmp: Whether to use FMP API for data Returns: Dictionary with simulation results """ try: # Validate inputs if not tickers or not weights: raise ValueError("No tickers or weights provided") if len(tickers) != len(weights): raise ValueError("Number of tickers must match number of weights") if not all(0 <= w <= 1 for w in weights): raise ValueError("Weights must be between 0 and 1") if sum(weights) != 1: raise ValueError("Weights must sum to 1") # Get historical data historical_data = {} for ticker in tickers: if use_fmp and FMP_API_KEY: data = fetch_etf_data_fmp(ticker) if data and 'historical' in data: historical_data[ticker] = data['historical'] else: logger.warning(f"Falling back to yfinance for {ticker}") data = fetch_etf_data_yfinance(ticker) if data and 'historical' in data: historical_data[ticker] = data['historical'] else: data = fetch_etf_data_yfinance(ticker) if data and 'historical' in data: historical_data[ticker] = data['historical'] if not historical_data: raise ValueError("No historical data available for any tickers") # Create portfolio DataFrame portfolio = pd.DataFrame() for ticker, data in historical_data.items(): portfolio[ticker] = data['close'] # Calculate portfolio returns portfolio_returns = portfolio.pct_change() portfolio_returns = portfolio_returns.fillna(0) # Calculate weighted returns weighted_returns = pd.DataFrame() for i, ticker in enumerate(tickers): weighted_returns[ticker] = portfolio_returns[ticker] * weights[i] portfolio_returns['portfolio'] = weighted_returns.sum(axis=1) # Calculate cumulative returns cumulative_returns = (1 + portfolio_returns).cumprod() # Calculate portfolio value portfolio_value = initial_investment * cumulative_returns['portfolio'] # Calculate metrics total_return = (portfolio_value.iloc[-1] / initial_investment) - 1 annual_return = (1 + total_return) ** (252 / len(portfolio_value)) - 1 volatility = portfolio_returns['portfolio'].std() * np.sqrt(252) sharpe_ratio = annual_return / volatility if volatility != 0 else 0 # Calculate drawdown rolling_max = portfolio_value.expanding().max() drawdown = (portfolio_value - rolling_max) / rolling_max max_drawdown = drawdown.min() return { 'portfolio_value': portfolio_value, 'returns': portfolio_returns, 'cumulative_returns': cumulative_returns, 'total_return': total_return, 'annual_return': annual_return, 'volatility': volatility, 'sharpe_ratio': sharpe_ratio, 'max_drawdown': max_drawdown, 'drawdown': drawdown } except Exception as e: logger.error(f"Error in portfolio simulation: {str(e)}") st.error(f"Error running portfolio simulation: {str(e)}") return None def format_large_number(value: float) -> str: """Format large numbers with K, M, B, T suffixes.""" if abs(value) >= 1e12: # Trillions return f"${value/1e12:.2f}T" elif abs(value) >= 1e9: # Billions return f"${value/1e9:.2f}B" elif abs(value) >= 1e6: # Millions return f"${value/1e6:.2f}M" elif abs(value) >= 1e3: # Thousands return f"${value/1e3:.2f}K" else: return f"${value:,.2f}" def portfolio_summary(final_alloc: pd.DataFrame) -> None: """ Display a summary of the portfolio allocation. Args: final_alloc: DataFrame containing the portfolio allocation """ if final_alloc is None or final_alloc.empty: st.warning("No portfolio data available.") return try: # Calculate key metrics total_capital = final_alloc["Capital Allocated ($)"].sum() total_income = final_alloc["Income Contributed ($)"].sum() # Calculate weighted average yield weighted_yield = (final_alloc["Allocation (%)"] * final_alloc["Yield (%)"]).sum() / 100 # Display metrics in columns col1, col2, col3 = st.columns(3) with col1: st.metric("Total Capital", format_large_number(total_capital)) with col2: st.metric("Annual Income", format_large_number(total_income)) st.metric("Monthly Income", format_large_number(total_income/12)) with col3: st.metric("Average Yield", f"{weighted_yield:.2f}%") st.metric("Effective Yield", f"{(total_income/total_capital*100):.2f}%") # Display allocation chart fig = px.pie( final_alloc, values="Allocation (%)", names="Ticker", title="Portfolio Allocation by ETF", hover_data={ "Ticker": True, "Allocation (%)": ":.2f", "Yield (%)": ":.2f", "Capital Allocated ($)": ":,.2f", "Income Contributed ($)": ":,.2f" } ) st.plotly_chart(fig, use_container_width=True) # Display detailed allocation table st.subheader("Detailed Allocation") display_df = final_alloc.copy() display_df["Monthly Income"] = display_df["Income Contributed ($)"] / 12 # Format large numbers in the display DataFrame display_df["Capital Allocated ($)"] = display_df["Capital Allocated ($)"].apply(format_large_number) display_df["Income Contributed ($)"] = display_df["Income Contributed ($)"].apply(format_large_number) display_df["Monthly Income"] = display_df["Monthly Income"].apply(format_large_number) # Ensure data_source column exists and rename it for display if "data_source" in display_df.columns: display_df = display_df.rename(columns={"data_source": "Data Source"}) else: display_df["Data Source"] = "Unknown" # Select and order columns for display display_columns = [ "Ticker", "Allocation (%)", "Yield (%)", "Price", "Shares", "Capital Allocated ($)", "Monthly Income", "Income Contributed ($)", "Risk Level", "Data Source" ] # Format the display st.dataframe( display_df[display_columns].style.format({ "Allocation (%)": "{:.2f}%", "Yield (%)": "{:.2f}%", "Price": "${:,.2f}", "Shares": "{:,.4f}" }), column_config={ "Ticker": st.column_config.TextColumn("Ticker", disabled=True), "Allocation (%)": st.column_config.NumberColumn( "Allocation (%)", min_value=0.0, max_value=100.0, step=0.1, format="%.1f", required=True ), "Yield (%)": st.column_config.TextColumn("Yield (%)", disabled=True), "Price": st.column_config.TextColumn("Price", disabled=True), "Shares": st.column_config.TextColumn("Shares", disabled=True), "Capital Allocated ($)": st.column_config.TextColumn("Capital Allocated ($)", disabled=True), "Monthly Income": st.column_config.TextColumn("Monthly Income", disabled=True), "Income Contributed ($)": st.column_config.TextColumn("Income Contributed ($)", disabled=True), "Risk Level": st.column_config.TextColumn("Risk Level", disabled=True), "Data Source": st.column_config.TextColumn("Data Source", disabled=True) }, hide_index=True, use_container_width=True ) except Exception as e: st.error(f"Error calculating portfolio summary: {str(e)}") logger.error(f"Error in portfolio_summary: {str(e)}") logger.error(traceback.format_exc()) def save_portfolio(portfolio_name: str, final_alloc: pd.DataFrame, mode: str, target: float) -> bool: """ Save portfolio allocation to a JSON file. Args: portfolio_name: Name of the portfolio final_alloc: DataFrame containing portfolio allocation mode: Portfolio mode ("Income Target" or "Capital Target") target: Target value (income or capital) Returns: bool: True if save was successful, False otherwise """ try: # Create portfolios directory if it doesn't exist portfolios_dir = Path("portfolios") portfolios_dir.mkdir(exist_ok=True) # Prepare portfolio data portfolio_data = { "name": portfolio_name, "created_at": datetime.now().isoformat(), "mode": mode, "target": target, "allocations": [] } # Convert DataFrame to list of dictionaries for _, row in final_alloc.iterrows(): allocation = { "ticker": row["Ticker"], "allocation": float(row["Allocation (%)"]), "yield": float(row["Yield (%)"]), "price": float(row["Price"]), "risk_level": row["Risk Level"] } portfolio_data["allocations"].append(allocation) # Save to JSON file file_path = portfolios_dir / f"{portfolio_name}.json" with open(file_path, 'w') as f: json.dump(portfolio_data, f, indent=2) return True except Exception as e: st.error(f"Error saving portfolio: {str(e)}") return False def load_portfolio(portfolio_name: str) -> Tuple[Optional[pd.DataFrame], Optional[str], Optional[float]]: """ Load portfolio allocation from a JSON file. Args: portfolio_name: Name of the portfolio to load Returns: Tuple containing: - DataFrame with portfolio allocation - Portfolio mode - Target value """ try: # Check if portfolio exists file_path = Path("portfolios") / f"{portfolio_name}.json" if not file_path.exists(): st.error(f"Portfolio '{portfolio_name}' not found.") return None, None, None # Load portfolio data with open(file_path, 'r') as f: portfolio_data = json.load(f) # Convert allocations to DataFrame allocations = portfolio_data["allocations"] df = pd.DataFrame(allocations) # Rename columns to match expected format df = df.rename(columns={ "allocation": "Allocation (%)", "yield": "Yield (%)", "price": "Price" }) return df, portfolio_data["mode"], portfolio_data["target"] except Exception as e: st.error(f"Error loading portfolio: {str(e)}") return None, None, None def list_saved_portfolios() -> List[str]: """ List all saved portfolios. Returns: List of portfolio names """ try: portfolios_dir = Path("portfolios") if not portfolios_dir.exists(): return [] # Get all JSON files in the portfolios directory portfolio_files = list(portfolios_dir.glob("*.json")) # Extract portfolio names from filenames portfolio_names = [f.stem for f in portfolio_files] return sorted(portfolio_names) except Exception as e: st.error(f"Error listing portfolios: {str(e)}") return [] def allocate_for_income(df: pd.DataFrame, target: float, etf_allocations: List[Dict[str, Any]]) -> pd.DataFrame: """ Allocate portfolio for income target. Args: df: DataFrame with ETF data target: Monthly income target etf_allocations: List of ETF allocations Returns: DataFrame with final allocation """ try: # Create final allocation DataFrame final_alloc = df.copy() # Initialize allocation column if it doesn't exist if "Allocation (%)" not in final_alloc.columns: final_alloc["Allocation (%)"] = 0.0 # Set allocations for alloc in etf_allocations: mask = final_alloc["Ticker"] == alloc["ticker"] if mask.any(): final_alloc.loc[mask, "Allocation (%)"] = alloc["allocation"] # Already in percentage else: logger.warning(f"Ticker {alloc['ticker']} not found in DataFrame") # Verify allocations are set if final_alloc["Allocation (%)"].sum() == 0: logger.error("No allocations were set") return None # Calculate required capital for income target monthly_income = target annual_income = monthly_income * 12 # Calculate weighted average yield weighted_yield = (final_alloc["Allocation (%)"] * final_alloc["Yield (%)"]).sum() / 100 if weighted_yield == 0: logger.error("Weighted yield is zero") return None # Calculate required capital based on weighted yield required_capital = annual_income / (weighted_yield / 100) # Calculate capital allocation and income final_alloc["Capital Allocated ($)"] = (final_alloc["Allocation (%)"] / 100) * required_capital final_alloc["Shares"] = final_alloc["Capital Allocated ($)"] / final_alloc["Price"] final_alloc["Income Contributed ($)"] = (final_alloc["Capital Allocated ($)"] * final_alloc["Yield (%)"]) / 100 # Verify calculations total_income = final_alloc["Income Contributed ($)"].sum() if abs(total_income - annual_income) > 1.0: # Allow for small rounding errors logger.warning(f"Total income ({total_income}) does not match target ({annual_income})") logger.info(f"Income allocation completed. Required capital: ${required_capital:,.2f}") logger.info(f"Final allocations:\n{final_alloc}") return final_alloc except Exception as e: logger.error(f"Error in income allocation: {str(e)}") logger.error(traceback.format_exc()) return None def allocate_for_capital(df: pd.DataFrame, initial_capital: float, etf_allocations: List[Dict[str, Any]]) -> pd.DataFrame: """ Allocate portfolio for capital target. Args: df: DataFrame with ETF data initial_capital: Initial capital amount etf_allocations: List of ETF allocations Returns: DataFrame with final allocation """ try: # Create final allocation DataFrame final_alloc = df.copy() # Initialize allocation column if it doesn't exist if "Allocation (%)" not in final_alloc.columns: final_alloc["Allocation (%)"] = 0.0 # Set allocations for alloc in etf_allocations: mask = final_alloc["Ticker"] == alloc["ticker"] if mask.any(): final_alloc.loc[mask, "Allocation (%)"] = alloc["allocation"] # Already in percentage else: logger.warning(f"Ticker {alloc['ticker']} not found in DataFrame") # Verify allocations are set if final_alloc["Allocation (%)"].sum() == 0: logger.error("No allocations were set") return None # Calculate capital allocation and income final_alloc["Capital Allocated ($)"] = (final_alloc["Allocation (%)"] / 100) * initial_capital final_alloc["Shares"] = final_alloc["Capital Allocated ($)"] / final_alloc["Price"] final_alloc["Income Contributed ($)"] = (final_alloc["Capital Allocated ($)"] * final_alloc["Yield (%)"]) / 100 # Verify calculations total_capital = final_alloc["Capital Allocated ($)"].sum() if abs(total_capital - initial_capital) > 1.0: # Allow for small rounding errors logger.warning(f"Total capital ({total_capital}) does not match initial capital ({initial_capital})") logger.info(f"Capital allocation completed. Initial capital: ${initial_capital:,.2f}") logger.info(f"Final allocations:\n{final_alloc}") return final_alloc except Exception as e: logger.error(f"Error in capital allocation: {str(e)}") logger.error(traceback.format_exc()) return None def reset_simulation(): """Reset all simulation data and state.""" st.session_state.simulation_run = False st.session_state.df_data = None st.session_state.final_alloc = None st.session_state.mode = 'Capital Target' st.session_state.target = 0 st.session_state.initial_capital = 0 st.session_state.enable_drip = False st.session_state.enable_erosion = False st.rerun() def test_fmp_connection() -> bool: """Test connection to FMP API.""" try: if not FMP_API_KEY: st.error("FMP API key not found in environment variables") return False session = get_fmp_session() test_url = f"{FMP_BASE_URL}/profile/SPY?apikey={FMP_API_KEY}" logger.info(f"Making FMP API test call to {test_url}") response = session.get(test_url) st.session_state.api_calls += 1 logger.info(f"FMP API call count: {st.session_state.api_calls}") if response.status_code == 200: return True else: st.error(f"Failed to connect to FMP API: {response.status_code}") logger.error(f"FMP API test failed: {response.text}") return False except Exception as e: st.error(f"Error testing FMP connection: {str(e)}") logger.error(f"FMP API test error: {str(e)}") return False def get_cache_stats() -> Dict[str, Any]: """ Get statistics about the cache usage. Returns: Dictionary containing cache statistics """ try: cache_dir = Path("cache") if not cache_dir.exists(): return { "ticker_count": 0, "file_count": 0, "total_size_kb": 0 } # Get all cache files cache_files = list(cache_dir.glob("**/*.json")) # Count unique tickers tickers = set() for file in cache_files: # Extract ticker from filename (assuming format: ticker_data_type.json) ticker = file.stem.split('_')[0] tickers.add(ticker) # Calculate total size total_size = sum(file.stat().st_size for file in cache_files) return { "ticker_count": len(tickers), "file_count": len(cache_files), "total_size_kb": total_size / 1024 # Convert to KB } except Exception as e: logger.error(f"Error getting cache stats: {str(e)}") return { "ticker_count": 0, "file_count": 0, "total_size_kb": 0 } def clear_cache(ticker: Optional[str] = None) -> None: """ Clear cache files for a specific ticker or all tickers. Args: ticker: Optional ticker symbol to clear cache for. If None, clears all cache. """ try: cache_dir = Path("cache") if not cache_dir.exists(): return if ticker: # Clear cache for specific ticker pattern = f"{ticker.upper()}_*.json" cache_files = list(cache_dir.glob(f"**/{pattern}")) else: # Clear all cache files cache_files = list(cache_dir.glob("**/*.json")) # Delete cache files for file in cache_files: try: file.unlink() logger.info(f"Deleted cache file: {file}") except Exception as e: logger.error(f"Error deleting cache file {file}: {str(e)}") except Exception as e: logger.error(f"Error clearing cache: {str(e)}") # Set page config st.set_page_config( page_title="ETF Portfolio Builder", page_icon="📈", layout="wide", initial_sidebar_state="expanded" ) # Initialize session state variables if 'simulation_run' not in st.session_state: st.session_state.simulation_run = False logger.info("Initialized simulation_run in session state") if 'df_data' not in st.session_state: st.session_state.df_data = None logger.info("Initialized df_data in session state") if 'final_alloc' not in st.session_state: st.session_state.final_alloc = None logger.info("Initialized final_alloc in session state") if 'mode' not in st.session_state: st.session_state.mode = 'Capital Target' logger.info("Initialized mode in session state") if 'target' not in st.session_state: st.session_state.target = 0 logger.info("Initialized target in session state") if 'initial_capital' not in st.session_state: st.session_state.initial_capital = 0 logger.info("Initialized initial_capital in session state") if 'enable_drip' not in st.session_state: st.session_state.enable_drip = False logger.info("Initialized enable_drip in session state") if 'enable_erosion' not in st.session_state: st.session_state.enable_erosion = False logger.info("Initialized enable_erosion in session state") if 'api_calls' not in st.session_state: st.session_state.api_calls = 0 logger.info("Initialized api_calls in session state") if 'force_refresh_data' not in st.session_state: st.session_state.force_refresh_data = False logger.info("Initialized force_refresh_data in session state") if 'etf_allocations' not in st.session_state: st.session_state.etf_allocations = [] logger.info("Initialized empty etf_allocations in session state") if 'risk_tolerance' not in st.session_state: st.session_state.risk_tolerance = "Moderate" logger.info("Initialized risk_tolerance in session state") if 'erosion_level' not in st.session_state: st.session_state.erosion_level = { "nav": 5.0, # Default 5% annual NAV erosion "yield": 5.0 # Default 5% annual yield erosion } logger.info("Initialized erosion_level in session state") if 'erosion_type' not in st.session_state: st.session_state.erosion_type = "NAV & Yield Erosion" logger.info("Initialized erosion_type in session state") if 'per_ticker_erosion' not in st.session_state: st.session_state.per_ticker_erosion = {} logger.info("Initialized per_ticker_erosion in session state") if 'use_per_ticker_erosion' not in st.session_state: st.session_state.use_per_ticker_erosion = False logger.info("Initialized use_per_ticker_erosion in session state") # Main title st.title("📈 ETF Portfolio Builder") # Function to remove ticker def remove_ticker(ticker_to_remove: str) -> None: """Remove a ticker from the portfolio.""" try: logger.info(f"Removing ticker: {ticker_to_remove}") current_allocations = list(st.session_state.etf_allocations) st.session_state.etf_allocations = [etf for etf in current_allocations if etf["ticker"] != ticker_to_remove] logger.info(f"Updated allocations after removal: {st.session_state.etf_allocations}") st.rerun() except Exception as e: logger.error(f"Error removing ticker: {str(e)}") st.error(f"Error removing ticker: {str(e)}") # Display current tickers in the main space if st.session_state.etf_allocations: st.subheader("Selected ETFs") # Create four columns for the tables with smaller widths col1, col2, col3, col4 = st.columns([1, 1, 1, 1]) # Split the ETFs into four groups etf_groups = [[] for _ in range(4)] for i, etf in enumerate(st.session_state.etf_allocations): group_index = i % 4 etf_groups[group_index].append(etf) # Display each group in its own column for i, (col, etf_group) in enumerate(zip([col1, col2, col3, col4], etf_groups)): with col: for etf in etf_group: st.markdown(f"""
{etf['ticker']}
""", unsafe_allow_html=True) if st.button("×", key=f"remove_{etf['ticker']}", help=f"Remove {etf['ticker']} from portfolio"): remove_ticker(etf['ticker']) # Debug information logger.info("=== Session State Debug ===") logger.info(f"Full session state: {dict(st.session_state)}") logger.info(f"ETF allocations type: {type(st.session_state.etf_allocations)}") logger.info(f"ETF allocations content: {st.session_state.etf_allocations}") logger.info("=== End Session State Debug ===") def add_etf_to_portfolio(ticker: str) -> bool: """Add an ETF to the portfolio with proper validation and error handling.""" try: logger.info("=== Adding ETF to Portfolio ===") logger.info(f"Input ticker: {ticker}") logger.info(f"Current allocations before adding: {st.session_state.etf_allocations}") logger.info(f"Current allocations type: {type(st.session_state.etf_allocations)}") # Validate ticker format if not re.match(r'^[A-Z]{1,7}$', ticker.upper()): logger.warning(f"Invalid ticker format: {ticker}") st.error("Invalid ticker format. Must be 1-7 uppercase letters.") return False # Check if ticker already exists if any(etf["ticker"] == ticker.upper() for etf in st.session_state.etf_allocations): logger.warning(f"Ticker {ticker.upper()} already exists in portfolio") st.warning(f"{ticker.upper()} is already in your portfolio.") return False # Verify ticker exists by fetching data logger.info(f"Fetching data for ticker: {ticker.upper()}") etf_data = fetch_etf_data([ticker.upper()]) logger.info(f"Fetched ETF data: {etf_data}") if etf_data is None or etf_data.empty: logger.warning(f"Unknown ticker: {ticker.upper()}") st.error(f"Unknown ticker: {ticker.upper()}. Please enter a valid ETF ticker.") return False # Create new ETF entry new_etf = { "ticker": ticker.upper(), "allocation": 0.0 } logger.info(f"Created new ETF entry: {new_etf}") # Update session state current_allocations = list(st.session_state.etf_allocations) current_allocations.append(new_etf) st.session_state.etf_allocations = current_allocations logger.info(f"Updated session state allocations: {st.session_state.etf_allocations}") logger.info(f"Updated allocations type: {type(st.session_state.etf_allocations)}") # Recalculate allocations based on risk tolerance if len(st.session_state.etf_allocations) > 0: risk_tolerance = st.session_state.risk_tolerance tickers = [etf["ticker"] for etf in st.session_state.etf_allocations] logger.info(f"Recalculating allocations for tickers: {tickers}") df_data = fetch_etf_data(tickers) logger.info(f"Fetched data for recalculation: {df_data}") if df_data is not None and not df_data.empty: etf_metrics = df_data.to_dict('records') new_allocations = optimize_portfolio_allocation( etf_metrics, risk_tolerance, pd.DataFrame() ) logger.info(f"Calculated new allocations: {new_allocations}") st.session_state.etf_allocations = new_allocations logger.info(f"Updated session state with new allocations: {st.session_state.etf_allocations}") logger.info("=== End Adding ETF to Portfolio ===") return True except Exception as e: logger.error("=== Error Adding ETF to Portfolio ===") logger.error(f"Error: {str(e)}") logger.error(traceback.format_exc()) st.error(f"Error adding ETF: {str(e)}") return False def remove_etf_from_portfolio(index: int) -> bool: """Remove an ETF from the portfolio with proper validation and error handling.""" try: logger.info(f"Attempting to remove ETF at index: {index}") logger.info(f"Current allocations before removal: {st.session_state.etf_allocations}") if not st.session_state.etf_allocations or index >= len(st.session_state.etf_allocations): logger.warning(f"Invalid ETF index for removal: {index}") return False # Create new list without the removed ETF current_allocations = list(st.session_state.etf_allocations) removed_etf = current_allocations.pop(index) st.session_state.etf_allocations = current_allocations logger.info(f"Successfully removed ETF: {removed_etf}") logger.info(f"Updated allocations: {st.session_state.etf_allocations}") # Recalculate allocations if there are remaining ETFs if st.session_state.etf_allocations: risk_tolerance = st.session_state.risk_tolerance tickers = [etf["ticker"] for etf in st.session_state.etf_allocations] df_data = fetch_etf_data(tickers) if df_data is not None and not df_data.empty: etf_metrics = df_data.to_dict('records') new_allocations = optimize_portfolio_allocation( etf_metrics, risk_tolerance, pd.DataFrame() ) st.session_state.etf_allocations = new_allocations logger.info(f"Recalculated allocations: {new_allocations}") return True except Exception as e: logger.error(f"Error removing ETF: {str(e)}") logger.error(traceback.format_exc()) st.error(f"Error removing ETF: {str(e)}") return False # Sidebar for ETF input with st.sidebar: st.header("ETF Allocation") # Create a container for ETF input with st.container(): # Input field for ETF ticker with improved visibility st.markdown(""" """, unsafe_allow_html=True) new_ticker = st.text_input("ETF Ticker", help="Enter a valid ETF ticker (e.g., SCHD)") # Add button to add ETF add_etf_button = st.button("ADD ETF", use_container_width=True) if add_etf_button: logger.info("=== Add ETF Button Clicked ===") logger.info(f"Input ticker: {new_ticker}") logger.info(f"Current allocations: {st.session_state.etf_allocations}") if not new_ticker: st.error("Please enter an ETF ticker.") logger.warning("No ticker provided") elif len(st.session_state.etf_allocations) >= 10: st.error("Maximum of 10 ETFs allowed in portfolio.") logger.warning("Maximum ETF limit reached") else: if add_etf_to_portfolio(new_ticker): st.success(f"Added {new_ticker.upper()} to portfolio.") logger.info("Successfully added ETF, triggering rerun") st.rerun() # Display total allocation if st.session_state.etf_allocations: current_total = sum(etf["allocation"] for etf in st.session_state.etf_allocations) st.metric("Total Allocation (%)", f"{current_total:.2f}") # Add a warning if total is not 100% if abs(current_total - 100) > 0.1: st.warning("Total allocation should be 100%") else: st.info("No ETFs added yet. Please add ETFs to your portfolio.") logger.info("No ETFs in portfolio") # Mode selection simulation_mode = st.radio( "Select Simulation Mode", ["Capital Target", "Income Target"] ) if simulation_mode == "Income Target": monthly_target = st.number_input( "Monthly Income Target ($)", min_value=100.0, max_value=100000.0, value=1000.0, step=100.0 ) ANNUAL_TARGET = monthly_target * 12 else: initial_capital = st.number_input( "Initial Capital ($)", min_value=1000.0, max_value=1000000.0, value=100000.0, step=1000.0 ) # Risk tolerance risk_tolerance = st.select_slider( "Risk Tolerance", options=["Conservative", "Moderate", "Aggressive"], value=st.session_state.get("risk_tolerance", "Moderate"), key="risk_tolerance_slider" ) # Check if risk tolerance changed if risk_tolerance != st.session_state.get("risk_tolerance"): logger.info("=== Risk Tolerance Change Detection ===") logger.info(f"Current risk tolerance in session state: {st.session_state.get('risk_tolerance')}") logger.info(f"New risk tolerance from slider: {risk_tolerance}") # Update session state st.session_state.risk_tolerance = risk_tolerance # Recalculate allocations if we have ETFs if st.session_state.etf_allocations: logger.info("Recalculating allocations due to risk tolerance change") tickers = [etf["ticker"] for etf in st.session_state.etf_allocations] df_data = fetch_etf_data(tickers) if df_data is not None and not df_data.empty: etf_metrics = df_data.to_dict('records') new_allocations = optimize_portfolio_allocation( etf_metrics, risk_tolerance, pd.DataFrame() ) st.session_state.etf_allocations = new_allocations logger.info(f"New allocations after risk tolerance change: {new_allocations}") # If simulation has been run, update final allocation if st.session_state.simulation_run and st.session_state.df_data is not None: if st.session_state.mode == "Income Target": final_alloc = allocate_for_income( st.session_state.df_data, st.session_state.target, new_allocations ) else: final_alloc = allocate_for_capital( st.session_state.df_data, st.session_state.initial_capital, new_allocations ) if final_alloc is not None: st.session_state.final_alloc = final_alloc st.rerun() # Additional options st.subheader("Additional Options") # DRIP option enable_drip = st.radio( "Enable Dividend Reinvestment (DRIP)", ["Yes", "No"], index=1 ) # Run simulation button if st.button("Run Portfolio Simulation", type="primary", use_container_width=True): if not st.session_state.etf_allocations: st.error("Please add at least one ETF to your portfolio.") else: # Store simulation parameters in session state st.session_state.mode = simulation_mode st.session_state.enable_drip = enable_drip == "Yes" st.session_state.enable_erosion = True if simulation_mode == "Income Target": st.session_state.target = monthly_target else: st.session_state.target = initial_capital st.session_state.initial_capital = initial_capital # Run simulation logger.info("Starting portfolio simulation...") logger.info(f"ETF allocations: {st.session_state.etf_allocations}") tickers = [etf["ticker"] for etf in st.session_state.etf_allocations] df_data = fetch_etf_data(tickers) logger.info(f"Fetched ETF data:\n{df_data}") if df_data is not None and not df_data.empty: if simulation_mode == "Income Target": logger.info(f"Allocating for income target: ${monthly_target}") final_alloc = allocate_for_income(df_data, monthly_target, st.session_state.etf_allocations) else: logger.info(f"Allocating for capital target: ${initial_capital}") final_alloc = allocate_for_capital(df_data, initial_capital, st.session_state.etf_allocations) logger.info(f"Final allocation result:\n{final_alloc}") if final_alloc is not None and not final_alloc.empty: st.session_state.simulation_run = True st.session_state.df_data = df_data st.session_state.final_alloc = final_alloc st.success("Portfolio simulation completed!") st.rerun() else: st.error("Failed to generate portfolio allocation. Please check your inputs and try again.") else: st.error("Failed to fetch ETF data. Please check your tickers and try again.") # Add reset simulation button at the bottom of sidebar if st.button("🔄 Reset Simulation", use_container_width=True, type="secondary"): reset_simulation() # Add FMP connection status to the navigation bar st.sidebar.markdown("---") st.sidebar.subheader("FMP API Status") connection_status = test_fmp_connection() if connection_status: st.sidebar.success("✅ FMP API: Connected") else: st.sidebar.error("❌ FMP API: Connection failed") # Advanced Options section in sidebar with st.sidebar.expander("Advanced Options"): # Option to toggle FMP API usage use_fmp_api = st.checkbox("Use FMP API for high-yield ETFs", value=USE_FMP_API, help="Use Financial Modeling Prep API for more accurate yield data on high-yield ETFs") if use_fmp_api != USE_FMP_API: # Update global setting if changed globals()["USE_FMP_API"] = use_fmp_api st.success("FMP API usage setting updated") # Add cache controls st.subheader("Cache Settings") # Display cache statistics cache_stats = get_cache_stats() st.write(f"Cache contains data for {cache_stats['ticker_count']} tickers ({cache_stats['file_count']} files, {cache_stats['total_size_kb']:.1f} KB)") # Force refresh option st.session_state.force_refresh_data = st.checkbox( "Force refresh data (ignore cache)", value=st.session_state.get("force_refresh_data", False), help="When enabled, always fetch fresh data from APIs" ) # Cache clearing options col1, col2 = st.columns(2) with col1: if st.button("Clear All Cache", key="clear_all_cache"): clear_cache() st.success("All cache files cleared!") st.session_state.api_calls = 0 with col2: ticker_to_clear = st.text_input("Clear cache for ticker:", key="cache_ticker") if st.button("Clear", key="clear_single_cache") and ticker_to_clear: clear_cache(ticker_to_clear) st.success(f"Cache cleared for {ticker_to_clear.upper()}") # Show API call counter st.write(f"API calls this session: {st.session_state.api_calls}") # Add option for debug mode and parallel processing debug_mode = st.checkbox("Enable Debug Mode", help="Show detailed error logs.") parallel_processing = st.checkbox("Enable Parallel Processing", value=True, help="Fetch data for multiple ETFs simultaneously") # Function removed - DRIP vs No-DRIP comparison is now handled directly in tab2 # Display results and interactive allocation adjustment UI after simulation is run if st.session_state.simulation_run and st.session_state.df_data is not None: df = st.session_state.df_data final_alloc = st.session_state.final_alloc if hasattr(st.session_state, 'final_alloc') else None # Validate final_alloc DataFrame if final_alloc is None or final_alloc.empty: st.error("No portfolio data available. Please run the simulation again.") st.session_state.simulation_run = False else: # Create tabs for better organization tab1, tab2, tab3, tab4, tab5 = st.tabs(["📈 Portfolio Overview", "📊 DRIP Forecast", "📉 Erosion Risk Assessment", "🤖 AI Suggestions", "📊 ETF Details"]) with tab1: st.subheader("💰 Portfolio Summary") portfolio_summary(final_alloc) # Display mode-specific information if st.session_state.mode == "Income Target": try: monthly_target = st.session_state.target ANNUAL_TARGET = monthly_target * 12 total_capital = final_alloc["Capital Allocated ($)"].sum() st.info(f"đŸŽ¯ **Income Target Mode**: You need ${total_capital:,.2f} to generate ${monthly_target:,.2f} in monthly income (${ANNUAL_TARGET:,.2f} annually).") except Exception as e: st.error(f"Error displaying income target information: {str(e)}") else: try: initial_capital = st.session_state.initial_capital annual_income = final_alloc["Income Contributed ($)"].sum() monthly_income = annual_income / 12 st.info(f"💲 **Capital Investment Mode**: Your ${initial_capital:,.2f} investment generates ${monthly_income:,.2f} in monthly income (${annual_income:,.2f} annually).") except Exception as e: st.error(f"Error displaying capital investment information: {str(e)}") with tab2: st.subheader("📊 DRIP vs No-DRIP Comparison") # Calculate both DRIP and No-DRIP scenarios logger.info("Calculating DRIP vs No-DRIP comparison...") try: # Initialize DRIP service from ETF_Portal.services.drip_service import DRIPService, DripConfig drip_service = DRIPService() # Create DRIP config (let service auto-calculate erosion rates) config = DripConfig( months=12, erosion_type=st.session_state.get("erosion_type", "Conservative"), erosion_level={} # Let the service calculate this automatically ) # Calculate DRIP vs No-DRIP comparison using the integrated method comparison_result = drip_service.calculate_drip_vs_no_drip_comparison( portfolio_df=final_alloc, config=config ) # Display comparison summary metrics st.subheader("📈 Strategy Performance Summary") col1, col2, col3, col4 = st.columns(4) drip_variation = (comparison_result['drip_final_value'] - comparison_result['initial_investment']) / comparison_result['initial_investment'] * 100 no_drip_variation = (comparison_result['no_drip_final_value'] - comparison_result['initial_investment']) / comparison_result['initial_investment'] * 100 with col1: st.metric( "DRIP Final Value", f"${comparison_result['drip_final_value']:,.2f}", delta=f"{drip_variation:+.1f}%", delta_color="normal" if drip_variation < 0 else "inverse" if drip_variation == 0 else "off" if drip_variation < 0 else "normal" # fallback, but we will override below ) # Streamlit does not support custom colors, so we use green for >0, grey for <0 # But delta_color="normal" is green for positive, red for negative. We'll use normal for green, off for grey. with col2: st.metric( "No-DRIP Final Value", f"${comparison_result['no_drip_final_value']:,.2f}", delta=f"{no_drip_variation:+.1f}%", delta_color="normal" if no_drip_variation > 0 else "off" ) with col3: winner = comparison_result['winner'] advantage = comparison_result['advantage_percentage'] st.metric( "Winner", winner, f"{advantage:.1f}% advantage" if winner != "Tie" else "Equal performance" ) with col4: st.metric( "Advantage Amount", f"${comparison_result['advantage_amount']:,.2f}" if winner != "Tie" else "$0.00" ) # Display Enhanced Monthly Details Tables st.subheader("📅 Monthly Details") # Get initial values for erosion tracking drip_result = comparison_result['drip_result'] no_drip_result = comparison_result['no_drip_result'] # Extract initial portfolio data for reference initial_portfolio_data = {} for _, row in final_alloc.iterrows(): ticker = row['Ticker'] initial_portfolio_data[ticker] = { 'initial_price': float(row['Price']), 'initial_yield': float(row['Yield (%)']) / 100, 'initial_shares': float(row['Shares']) } # Create tabs for different detail views detail_tab1, detail_tab2, detail_tab3 = st.tabs([ "📊 Summary Tables", "🔍 Per-Ticker Details", "📈 Erosion Tracking" ]) with detail_tab1: # Summary tables (existing functionality but enhanced) col1, col2 = st.columns(2) with col1: st.subheader("🔄 DRIP Monthly Summary") drip_monthly_data = [] for month_data in drip_result.monthly_data: # Calculate month-over-month changes if month_data.month > 1: prev_data = drip_result.monthly_data[month_data.month - 2] portfolio_change = month_data.total_value - prev_data.total_value portfolio_change_pct = (portfolio_change / prev_data.total_value) * 100 else: portfolio_change = 0 portfolio_change_pct = 0 # Calculate total shares total_shares = sum(month_data.shares.values()) drip_monthly_data.append({ 'Month': month_data.month, 'Portfolio Value': f"${month_data.total_value:,.2f}", 'Monthly Change': f"{portfolio_change_pct:+.1f}%" if month_data.month > 1 else "N/A", 'Monthly Income': f"${month_data.monthly_income:,.2f}", 'Cumulative Income': f"${month_data.cumulative_income:,.2f}", 'Total Shares': f"{total_shares:.4f}", 'Avg Price': f"${month_data.total_value / total_shares:.2f}" if total_shares > 0 else "N/A" }) drip_monthly_df = pd.DataFrame(drip_monthly_data) st.dataframe(drip_monthly_df, use_container_width=True, hide_index=True, height=400) with col2: st.subheader("💰 No-DRIP Monthly Summary") no_drip_monthly_data = [] for month_data in no_drip_result.monthly_data: # Calculate month-over-month changes if month_data.month > 1: prev_data = no_drip_result.monthly_data[month_data.month - 2] portfolio_change = month_data.portfolio_value - prev_data.portfolio_value portfolio_change_pct = (portfolio_change / prev_data.portfolio_value) * 100 else: portfolio_change = 0 portfolio_change_pct = 0 total_value = month_data.portfolio_value + month_data.cumulative_income no_drip_monthly_data.append({ 'Month': month_data.month, 'Portfolio Value': f"${month_data.portfolio_value:,.2f}", 'Monthly Change': f"{portfolio_change_pct:+.1f}%" if month_data.month > 1 else "N/A", 'Monthly Income': f"${month_data.monthly_income:,.2f}", 'Cumulative Cash': f"${month_data.cumulative_income:,.2f}", 'Total Value': f"${total_value:,.2f}", 'Cash Ratio': f"{(month_data.cumulative_income / total_value) * 100:.1f}%" if total_value > 0 else "0.0%" }) no_drip_monthly_df = pd.DataFrame(no_drip_monthly_data) st.dataframe(no_drip_monthly_df, use_container_width=True, hide_index=True, height=400) with detail_tab2: # Per-ticker detailed breakdown st.subheader("🔍 Per-Ticker Monthly Breakdown") # Get all tickers all_tickers = list(final_alloc['Ticker'].unique()) # Create detailed per-ticker tables for ticker in all_tickers: st.markdown(f"### **{ticker}** Performance") col1, col2 = st.columns(2) with col1: st.markdown("**🔄 DRIP Strategy**") ticker_drip_data = [] for month_data in drip_result.monthly_data: shares = month_data.shares.get(ticker, 0) price = month_data.prices.get(ticker, 0) yield_rate = month_data.yields.get(ticker, 0) value = shares * price # Calculate income for this ticker (proportional) ticker_income = month_data.monthly_income * (value / month_data.total_value) if month_data.total_value > 0 else 0 # Calculate share growth initial_shares = initial_portfolio_data[ticker]['initial_shares'] share_growth = ((shares - initial_shares) / initial_shares) * 100 if initial_shares > 0 else 0 ticker_drip_data.append({ 'Month': month_data.month, 'Shares': f"{shares:.4f}", 'Price': f"${price:.2f}", 'Value': f"${value:,.2f}", 'Yield': f"{yield_rate:.2%}", 'Monthly Income': f"${ticker_income:,.2f}", 'Share Growth': f"{share_growth:+.1f}%" }) ticker_drip_df = pd.DataFrame(ticker_drip_data) st.dataframe(ticker_drip_df, use_container_width=True, hide_index=True, height=300) with col2: st.markdown("**💰 No-DRIP Strategy**") ticker_no_drip_data = [] for month_data in no_drip_result.monthly_data: shares = month_data.original_shares.get(ticker, 0) price = month_data.prices.get(ticker, 0) yield_rate = month_data.yields.get(ticker, 0) value = shares * price # Calculate income for this ticker (proportional) ticker_income = month_data.monthly_income * (value / month_data.portfolio_value) if month_data.portfolio_value > 0 else 0 ticker_no_drip_data.append({ 'Month': month_data.month, 'Shares': f"{shares:.4f}", 'Price': f"${price:.2f}", 'Value': f"${value:,.2f}", 'Yield': f"{yield_rate:.2%}", 'Monthly Income': f"${ticker_income:,.2f}", 'Share Growth': "0.0%" # No growth in No-DRIP }) ticker_no_drip_df = pd.DataFrame(ticker_no_drip_data) st.dataframe(ticker_no_drip_df, use_container_width=True, hide_index=True, height=300) st.markdown("---") # Separator between tickers with detail_tab3: # Erosion tracking over time st.subheader("📈 Price & Yield Erosion Tracking") # Create erosion tracking tables for ticker in all_tickers: st.markdown(f"### **{ticker}** Erosion Analysis") initial_price = initial_portfolio_data[ticker]['initial_price'] initial_yield = initial_portfolio_data[ticker]['initial_yield'] col1, col2 = st.columns(2) with col1: st.markdown("**🔄 DRIP Erosion**") drip_erosion_data = [] for month_data in drip_result.monthly_data: current_price = month_data.prices.get(ticker, initial_price) current_yield = month_data.yields.get(ticker, initial_yield) price_erosion = ((initial_price - current_price) / initial_price) * 100 yield_erosion = ((initial_yield - current_yield) / initial_yield) * 100 if initial_yield > 0 else 0 drip_erosion_data.append({ 'Month': month_data.month, 'Current Price': f"${current_price:.2f}", 'Price Erosion': f"{price_erosion:.1f}%", 'Current Yield': f"{current_yield:.2%}", 'Yield Erosion': f"{yield_erosion:.1f}%", 'Combined Impact': f"{(price_erosion + yield_erosion) / 2:.1f}%" }) drip_erosion_df = pd.DataFrame(drip_erosion_data) st.dataframe(drip_erosion_df, use_container_width=True, hide_index=True, height=300) with col2: st.markdown("**💰 No-DRIP Erosion**") no_drip_erosion_data = [] for month_data in no_drip_result.monthly_data: current_price = month_data.prices.get(ticker, initial_price) current_yield = month_data.yields.get(ticker, initial_yield) price_erosion = ((initial_price - current_price) / initial_price) * 100 yield_erosion = ((initial_yield - current_yield) / initial_yield) * 100 if initial_yield > 0 else 0 no_drip_erosion_data.append({ 'Month': month_data.month, 'Current Price': f"${current_price:.2f}", 'Price Erosion': f"{price_erosion:.1f}%", 'Current Yield': f"{current_yield:.2%}", 'Yield Erosion': f"{yield_erosion:.1f}%", 'Combined Impact': f"{(price_erosion + yield_erosion) / 2:.1f}%" }) no_drip_erosion_df = pd.DataFrame(no_drip_erosion_data) st.dataframe(no_drip_erosion_df, use_container_width=True, hide_index=True, height=300) st.markdown("---") # Separator between tickers # Enhanced download section st.subheader("đŸ“Ĩ Download Detailed Data") # Create comprehensive datasets for download download_col1, download_col2, download_col3 = st.columns(3) with download_col1: # DRIP comprehensive data comprehensive_drip_data = [] for month_data in drip_result.monthly_data: base_row = { 'Month': month_data.month, 'Total_Portfolio_Value': month_data.total_value, 'Monthly_Income': month_data.monthly_income, 'Cumulative_Income': month_data.cumulative_income } # Add per-ticker data for ticker in all_tickers: shares = month_data.shares.get(ticker, 0) price = month_data.prices.get(ticker, 0) yield_rate = month_data.yields.get(ticker, 0) base_row.update({ f'{ticker}_Shares': shares, f'{ticker}_Price': price, f'{ticker}_Value': shares * price, f'{ticker}_Yield': yield_rate }) comprehensive_drip_data.append(base_row) comprehensive_drip_df = pd.DataFrame(comprehensive_drip_data) drip_csv = comprehensive_drip_df.to_csv(index=False) st.download_button( label="đŸ“Ĩ Download DRIP Details", data=drip_csv, file_name="drip_comprehensive_details.csv", mime="text/csv", use_container_width=True ) with download_col2: # No-DRIP comprehensive data comprehensive_no_drip_data = [] for month_data in no_drip_result.monthly_data: base_row = { 'Month': month_data.month, 'Portfolio_Value': month_data.portfolio_value, 'Monthly_Income': month_data.monthly_income, 'Cumulative_Cash': month_data.cumulative_income, 'Total_Value': month_data.portfolio_value + month_data.cumulative_income } # Add per-ticker data for ticker in all_tickers: shares = month_data.original_shares.get(ticker, 0) price = month_data.prices.get(ticker, 0) yield_rate = month_data.yields.get(ticker, 0) base_row.update({ f'{ticker}_Shares': shares, f'{ticker}_Price': price, f'{ticker}_Value': shares * price, f'{ticker}_Yield': yield_rate }) comprehensive_no_drip_data.append(base_row) comprehensive_no_drip_df = pd.DataFrame(comprehensive_no_drip_data) no_drip_csv = comprehensive_no_drip_df.to_csv(index=False) st.download_button( label="đŸ“Ĩ Download No-DRIP Details", data=no_drip_csv, file_name="no_drip_comprehensive_details.csv", mime="text/csv", use_container_width=True ) with download_col3: # Comparison data comparison_data = [] for i in range(len(drip_result.monthly_data)): drip_data = drip_result.monthly_data[i] no_drip_data = no_drip_result.monthly_data[i] comparison_data.append({ 'Month': drip_data.month, 'DRIP_Portfolio_Value': drip_data.total_value, 'DRIP_Monthly_Income': drip_data.monthly_income, 'DRIP_Cumulative_Income': drip_data.cumulative_income, 'No_DRIP_Portfolio_Value': no_drip_data.portfolio_value, 'No_DRIP_Monthly_Income': no_drip_data.monthly_income, 'No_DRIP_Cumulative_Cash': no_drip_data.cumulative_income, 'No_DRIP_Total_Value': no_drip_data.portfolio_value + no_drip_data.cumulative_income, 'DRIP_Advantage': drip_data.total_value - (no_drip_data.portfolio_value + no_drip_data.cumulative_income) }) comparison_df = pd.DataFrame(comparison_data) comparison_csv = comparison_df.to_csv(index=False) st.download_button( label="đŸ“Ĩ Download Comparison", data=comparison_csv, file_name="drip_vs_no_drip_comparison.csv", mime="text/csv", use_container_width=True ) # Display break-even analysis with improved time visualization st.subheader("⏰ Break-Even Analysis") st.write("**Time to recover your initial investment and start making profit:**") # Create break-even comparison drip_be = comparison_result['drip_break_even'] no_drip_be = comparison_result['no_drip_break_even'] initial_investment = comparison_result['initial_investment'] # Create break-even metrics display col1, col2, col3 = st.columns(3) with col1: st.metric( "Initial Investment", f"${initial_investment:,.2f}", help="Amount you need to invest upfront" ) with col2: # DRIP break-even if drip_be['break_even_month']: years = drip_be['break_even_month'] // 12 months = drip_be['break_even_month'] % 12 days = drip_be['break_even_month'] * 30 # Approximate days if years > 0: time_str = f"{years}y {months}m" else: time_str = f"{months} months" st.metric( "🔄 DRIP Break-Even Time", time_str, f"≈ {days} days", help=f"DRIP strategy becomes profitable after {drip_be['break_even_month']} months" ) else: st.metric( "🔄 DRIP Break-Even Time", "Never", "Within 12 months", delta_color="inverse", help="DRIP strategy doesn't break even within the 12-month simulation period" ) with col3: # No-DRIP break-even if no_drip_be['break_even_month']: years = no_drip_be['break_even_month'] // 12 months = no_drip_be['break_even_month'] % 12 days = no_drip_be['break_even_month'] * 30 # Approximate days if years > 0: time_str = f"{years}y {months}m" else: time_str = f"{months} months" st.metric( "💰 No-DRIP Break-Even Time", time_str, f"≈ {days} days", help=f"No-DRIP strategy becomes profitable after {no_drip_be['break_even_month']} months" ) else: st.metric( "💰 No-DRIP Break-Even Time", "Never", "Within 12 months", delta_color="inverse", help="No-DRIP strategy doesn't break even within the 12-month simulation period" ) # Visual break-even timeline st.subheader("📊 Break-Even Timeline Visualization") # Create timeline chart months = list(range(1, 13)) # Convert string values to float for calculation drip_values_float = [float(md['Portfolio Value'].replace('$', '').replace(',', '')) for md in drip_monthly_data] no_drip_values = [float(md['Total Value'].replace('$', '').replace(',', '')) for md in no_drip_monthly_data] fig = go.Figure() # Add initial investment line fig.add_hline( y=initial_investment, line_dash="dash", line_color="red", annotation_text="Initial Investment (Break-Even Line)", annotation_position="top right" ) # Add DRIP line fig.add_trace(go.Scatter( x=months, y=drip_values_float, mode='lines+markers', name='DRIP Portfolio Value', line=dict(color='#1f77b4', width=3), marker=dict(size=8) )) # Add No-DRIP line fig.add_trace(go.Scatter( x=months, y=no_drip_values, mode='lines+markers', name='No-DRIP Total Value', line=dict(color='#ff7f0e', width=3), marker=dict(size=8) )) # Mark break-even points if drip_be['break_even_month'] and drip_be['break_even_month'] <= 12: fig.add_vline( x=drip_be['break_even_month'], line_dash="dot", line_color="#1f77b4", annotation_text=f"DRIP Break-Even\n(Month {drip_be['break_even_month']})", annotation_position="top" ) if no_drip_be['break_even_month'] and no_drip_be['break_even_month'] <= 12: fig.add_vline( x=no_drip_be['break_even_month'], line_dash="dot", line_color="#ff7f0e", annotation_text=f"No-DRIP Break-Even\n(Month {no_drip_be['break_even_month']})", annotation_position="bottom" ) fig.update_layout( title="Portfolio Value vs Initial Investment Over Time", xaxis_title="Month", yaxis_title="Portfolio Value ($)", template="plotly_white", height=500, hovermode='x unified' ) st.plotly_chart(fig, use_container_width=True) # Break-even explanation st.info(""" **Break-Even Analysis Explanation:** - **Break-even point**: When your total portfolio value exceeds your initial investment - **DRIP**: Portfolio value from share growth due to dividend reinvestment - **No-DRIP**: Portfolio value + accumulated cash from dividends - **Timeline**: Shows how quickly each strategy recovers your initial investment """) # Display recommendation st.subheader("💡 Investment Recommendation") if winner == "DRIP": st.success(f"đŸŽ¯ **Recommended Strategy: DRIP** - {advantage:.1f}% better performance due to compound growth from reinvested dividends.") elif winner == "No-DRIP": st.success(f"đŸŽ¯ **Recommended Strategy: No-DRIP** - {advantage:.1f}% better performance with immediate liquidity from cash dividends.") else: st.info("đŸŽ¯ **Both strategies perform equally.** Choose based on your liquidity preferences.") # Create performance comparison visualization st.subheader("📊 Performance Comparison Chart") # Create comparison data for chart fig = go.Figure() strategies = ["DRIP", "No-DRIP"] final_values = [comparison_result['drip_final_value'], comparison_result['no_drip_final_value']] colors = ['#1f77b4', '#ff7f0e'] # Add bars with different colors for winner for i, (strategy, value) in enumerate(zip(strategies, final_values)): color = '#2ca02c' if strategy == winner else colors[i] # Green for winner fig.add_trace(go.Bar( x=[strategy], y=[value], text=[f"${value:,.0f}"], textposition='auto', marker_color=color, name=strategy )) fig.update_layout( title="Final Portfolio Value Comparison", yaxis_title="Portfolio Value ($)", template="plotly_white", showlegend=False, height=400 ) st.plotly_chart(fig, use_container_width=True) # Display detailed analysis summary st.subheader("📋 Detailed Analysis Summary") st.markdown(comparison_result['comparison_summary']) # Add enhanced strategy explanation and table guide st.subheader("â„šī¸ Understanding the Monthly Details Tables") # Create expandable sections for explanations with st.expander("📊 Table Column Explanations", expanded=False): st.markdown(""" ### Summary Tables: - **Portfolio Value**: Current market value of all shares at eroded prices - **Monthly Change**: Month-over-month percentage change in portfolio value - **Monthly Income**: Dividends received/reinvested in that month - **Cumulative Income/Cash**: Total dividends received since start - **Total Shares** (DRIP): Total shares owned including reinvested dividends - **Avg Price** (DRIP): Average price per share across all holdings - **Cash Ratio** (No-DRIP): Percentage of total value held as cash ### Per-Ticker Details: - **Shares**: Number of shares owned (grows with DRIP, constant with No-DRIP) - **Price**: Current share price after erosion effects - **Value**: Market value of holdings (shares × price) - **Yield**: Current dividend yield after erosion - **Share Growth**: Percentage increase in shares from dividend reinvestment ### Erosion Tracking: - **Price Erosion**: Cumulative NAV erosion from initial price - **Yield Erosion**: Cumulative yield erosion from initial yield - **Combined Impact**: Average of price and yield erosion effects """) with st.expander("🔄 Strategy Explanations", expanded=False): col1, col2 = st.columns(2) with col1: st.markdown(""" **🔄 DRIP (Dividend Reinvestment Plan):** - Dividends automatically buy more shares - Share count increases each month - Benefits from compound growth - Portfolio value = growing shares × eroded prices - Best for long-term wealth building """) with col2: st.markdown(""" **💰 No-DRIP (Cash Distribution):** - Dividends kept as cash - Share count stays constant - Immediate income availability - Total value = portfolio + accumulated cash - Best for current income needs """) with st.expander("âš ī¸ Important Considerations", expanded=False): st.markdown(""" **Erosion Effects:** - Both strategies experience identical NAV (price) and yield erosion - High-yield ETFs typically have higher erosion risk - Erosion rates are calculated from historical ETF performance data **Performance Factors:** - DRIP benefits from compound growth but suffers from erosion on larger holdings - No-DRIP provides liquidity but misses compound growth opportunities - Break-even analysis shows when each strategy recovers initial investment **Data Accuracy:** - Monthly calculations include distribution frequency variations - Price and yield erosion applied monthly based on historical analysis - All values shown are projections based on current yields and calculated erosion rates """) # Note about erosion effects st.info(""" **📈 Reading the Data:** The Monthly Details tables show how your investment evolves over time under realistic market conditions including NAV and yield erosion. Use the Per-Ticker Details to see individual ETF performance, and the Erosion Tracking to understand how much your holdings are affected by market pressures. """) logger.info("DRIP vs No-DRIP comparison completed successfully") except Exception as e: st.error(f"Error calculating DRIP vs No-DRIP comparison: {str(e)}") logger.error(f"DRIP comparison error: {str(e)}") logger.error(traceback.format_exc()) st.stop() with tab3: st.subheader("📉 Erosion Risk Assessment") st.write(""" This analysis uses historical ETF data to estimate reasonable erosion settings based on past performance, volatility, and dividend history. """) try: from ETF_Portal.services.nav_erosion_service import NavErosionService with st.spinner("Analyzing historical ETF data..."): erosion_service = NavErosionService() risk_analysis = erosion_service.analyze_etf_erosion_risk(final_alloc["Ticker"].tolist()) except ImportError as e: st.error(f"Error importing NavErosionService: {str(e)}") st.error("Please ensure the nav_erosion_service module is properly installed.") logger.error(f"Import error: {str(e)}") logger.error(traceback.format_exc()) risk_analysis = None if risk_analysis and risk_analysis.results: risk_data = [] for result in risk_analysis.results: risk_data.append({ "Ticker": result.ticker, "NAV Erosion Risk (0-9)": result.nav_erosion_risk, "Yield Erosion Risk (0-9)": result.yield_erosion_risk, "Estimated Annual NAV Erosion": f"{result.estimated_nav_erosion:.1%}", "Estimated Annual Yield Erosion": f"{result.estimated_yield_erosion:.1%}", "NAV Risk Explanation": result.nav_risk_explanation, "Yield Risk Explanation": result.yield_risk_explanation, "ETF Age (Years)": f"{result.etf_age_years:.1f}" if result.etf_age_years else "Unknown", "Max Drawdown": f"{result.max_drawdown:.1%}" if result.max_drawdown else "Unknown", "Volatility": f"{result.volatility:.1%}" if result.volatility else "Unknown", "Sharpe Ratio": f"{result.sharpe_ratio:.2f}" if result.sharpe_ratio else "Unknown", "Sortino Ratio": f"{result.sortino_ratio:.2f}" if result.sortino_ratio else "Unknown", "Dividend Trend": f"{result.dividend_trend:.1%}" if result.dividend_trend else "Unknown" }) st.subheader("Recommended Erosion Settings") main_columns = [ "Ticker", "NAV Erosion Risk (0-9)", "Yield Erosion Risk (0-9)", "Estimated Annual NAV Erosion", "Estimated Annual Yield Erosion", "NAV Risk Explanation", "Yield Risk Explanation" ] st.dataframe( pd.DataFrame(risk_data)[main_columns], use_container_width=True, hide_index=True ) st.subheader("Detailed Risk Metrics") detail_columns = [ "Ticker", "ETF Age (Years)", "Max Drawdown", "Volatility", "Sharpe Ratio", "Sortino Ratio", "Dividend Trend" ] st.dataframe( pd.DataFrame(risk_data)[detail_columns], use_container_width=True, hide_index=True ) if st.button("Apply Recommended Erosion Settings", type="primary"): if "per_ticker_erosion" not in st.session_state or not isinstance(st.session_state.per_ticker_erosion, dict): st.session_state.per_ticker_erosion = {} for result in risk_analysis.results: st.session_state.per_ticker_erosion[result.ticker] = { "nav": result.nav_erosion_risk, "yield": result.yield_erosion_risk } st.session_state.erosion_type = "NAV & Yield Erosion" st.session_state.use_per_ticker_erosion = True erosion_level = { "global": { "nav": 5, "yield": 5 }, "per_ticker": st.session_state.per_ticker_erosion, "use_per_ticker": True } st.session_state.erosion_level = erosion_level st.success("Applied recommended erosion settings. They will be used in the DRIP forecast.") st.info("Go to the DRIP Forecast tab to see the impact of these settings.") else: st.error("Unable to analyze ETF erosion risk. Please try again.") with tab4: st.subheader("🤖 AI Portfolio Suggestions") try: # Get values from session state capital_target = st.session_state.initial_capital if st.session_state.mode == "Capital Target" else 3000.0 income_target = st.session_state.target * 12 if st.session_state.mode == "Income Target" else 0.0 risk_tolerance = st.session_state.risk_tolerance investment_horizon = 5 # Default to 5 years if not specified # Initialize services from ETF_Portal.services.data_service import DataService from ETF_Portal.services.etf_selection_service import ETFSelectionService from ETF_Portal.services.etf_selection_service import InvestmentGoal, RiskTolerance data_service = DataService() selection_service = ETFSelectionService(data_service) # Create investment goal goal = InvestmentGoal( capital_target=capital_target, income_target=income_target if income_target > 0 else None, risk_tolerance=RiskTolerance[risk_tolerance.upper()], investment_horizon=investment_horizon ) # Get AI suggestions for different strategies with st.spinner("Analyzing ETFs and generating portfolio suggestions..."): try: # Strategy 1: Balanced Growth balanced_goal = InvestmentGoal( capital_target=capital_target, income_target=income_target if income_target > 0 else None, risk_tolerance=RiskTolerance.MODERATE, investment_horizon=investment_horizon ) balanced_portfolio = selection_service.select_etfs(balanced_goal) # Strategy 2: Income Focus income_goal = InvestmentGoal( capital_target=capital_target, income_target=income_target * 1.2 if income_target > 0 else capital_target * 0.06, # 6% target yield risk_tolerance=RiskTolerance.CONSERVATIVE, investment_horizon=investment_horizon ) income_portfolio = selection_service.select_etfs(income_goal) # Strategy 3: Growth Focus growth_goal = InvestmentGoal( capital_target=capital_target, income_target=income_target * 0.8 if income_target > 0 else capital_target * 0.03, # 3% target yield risk_tolerance=RiskTolerance.AGGRESSIVE, investment_horizon=investment_horizon ) growth_portfolio = selection_service.select_etfs(growth_goal) # Strategy 4: Risk-Adjusted (uses user's risk tolerance) risk_adjusted_goal = InvestmentGoal( capital_target=capital_target, income_target=income_target if income_target > 0 else None, # No default yield target risk_tolerance=RiskTolerance[risk_tolerance.upper()], investment_horizon=investment_horizon ) risk_adjusted_portfolio = selection_service.select_etfs(risk_adjusted_goal) # Create tabs for each strategy strategy_tabs = st.tabs([ "🔄 Balanced Growth", "💰 Income Focus", "📈 Growth Focus", "âš–ī¸ Risk-Adjusted" ]) # Display Balanced Growth Strategy with strategy_tabs[0]: st.write("### Balanced Growth Strategy") st.write(""" A balanced approach focusing on both growth and income, suitable for most investors. - Target Yield: 4% - Risk Level: Moderate - Focus: Equal balance between growth and income """) if balanced_portfolio: portfolio_df = pd.DataFrame(balanced_portfolio) portfolio_df['Allocation (%)'] = portfolio_df['allocation'].round().astype(int) portfolio_df['Amount ($)'] = portfolio_df['amount'] st.dataframe( portfolio_df[['ticker', 'name', 'Allocation (%)', 'Amount ($)']], hide_index=True ) # Display metrics metrics_df = pd.DataFrame([ { 'Ticker': etf['ticker'], 'Expense Ratio (%)': etf['metrics']['expense_ratio'] * 100, 'AUM ($B)': etf['metrics']['aum'] / 1e9, 'Volatility (%)': etf['metrics']['volatility'] * 100, 'Max Drawdown (%)': etf['metrics']['max_drawdown'] * 100, 'Sharpe Ratio': etf['metrics']['sharpe_ratio'], 'Dividend Yield (%)': etf['metrics']['dividend_yield'] } for etf in balanced_portfolio ]) st.dataframe(metrics_df, hide_index=True) else: st.error("Could not generate balanced growth portfolio.") # Display Income Focus Strategy with strategy_tabs[1]: st.write("### Income Focus Strategy") st.write(""" Optimized for higher dividend income with lower risk, suitable for income-focused investors. - Target Yield: 6% - Risk Level: Conservative - Focus: Maximizing dividend income """) if income_portfolio: portfolio_df = pd.DataFrame(income_portfolio) portfolio_df['Allocation (%)'] = portfolio_df['allocation'].round().astype(int) portfolio_df['Amount ($)'] = portfolio_df['amount'] st.dataframe( portfolio_df[['ticker', 'name', 'Allocation (%)', 'Amount ($)']], hide_index=True ) # Display metrics metrics_df = pd.DataFrame([ { 'Ticker': etf['ticker'], 'Expense Ratio (%)': etf['metrics']['expense_ratio'] * 100, 'AUM ($B)': etf['metrics']['aum'] / 1e9, 'Volatility (%)': etf['metrics']['volatility'] * 100, 'Max Drawdown (%)': etf['metrics']['max_drawdown'] * 100, 'Sharpe Ratio': etf['metrics']['sharpe_ratio'], 'Dividend Yield (%)': etf['metrics']['dividend_yield'] } for etf in income_portfolio ]) st.dataframe(metrics_df, hide_index=True) else: st.error("Could not generate income focus portfolio.") # Display Growth Focus Strategy with strategy_tabs[2]: st.write("### Growth Focus Strategy") st.write(""" Optimized for capital appreciation with higher risk tolerance, suitable for growth investors. - Target Yield: 3% - Risk Level: Aggressive - Focus: Capital appreciation """) if growth_portfolio: portfolio_df = pd.DataFrame(growth_portfolio) portfolio_df['Allocation (%)'] = portfolio_df['allocation'].round().astype(int) portfolio_df['Amount ($)'] = portfolio_df['amount'] st.dataframe( portfolio_df[['ticker', 'name', 'Allocation (%)', 'Amount ($)']], hide_index=True ) # Display metrics metrics_df = pd.DataFrame([ { 'Ticker': etf['ticker'], 'Expense Ratio (%)': etf['metrics']['expense_ratio'] * 100, 'AUM ($B)': etf['metrics']['aum'] / 1e9, 'Volatility (%)': etf['metrics']['volatility'] * 100, 'Max Drawdown (%)': etf['metrics']['max_drawdown'] * 100, 'Sharpe Ratio': etf['metrics']['sharpe_ratio'], 'Dividend Yield (%)': etf['metrics']['dividend_yield'] } for etf in growth_portfolio ]) st.dataframe(metrics_df, hide_index=True) else: st.error("Could not generate growth focus portfolio.") # Display Risk-Adjusted Strategy with strategy_tabs[3]: st.write("### Risk-Adjusted Strategy") st.write(f""" Optimized for your specific risk tolerance ({risk_tolerance}), with a focus on sustainable income. - Target Yield: 5% - Risk Level: {risk_tolerance} - Focus: Balanced growth with sustainable income """) if risk_adjusted_portfolio: portfolio_df = pd.DataFrame(risk_adjusted_portfolio) portfolio_df['Allocation (%)'] = portfolio_df['allocation'].round().astype(int) portfolio_df['Amount ($)'] = portfolio_df['amount'] st.dataframe( portfolio_df[['ticker', 'name', 'Allocation (%)', 'Amount ($)']], hide_index=True ) # Display metrics metrics_df = pd.DataFrame([ { 'Ticker': etf['ticker'], 'Expense Ratio (%)': etf['metrics']['expense_ratio'] * 100, 'AUM ($B)': etf['metrics']['aum'] / 1e9, 'Volatility (%)': etf['metrics']['volatility'] * 100, 'Max Drawdown (%)': etf['metrics']['max_drawdown'] * 100, 'Sharpe Ratio': etf['metrics']['sharpe_ratio'], 'Dividend Yield (%)': etf['metrics']['dividend_yield'] } for etf in risk_adjusted_portfolio ]) st.dataframe(metrics_df, hide_index=True) else: st.error("Could not generate risk-adjusted portfolio.") # Add buttons to apply each strategy st.write("### Apply Strategy") col1, col2, col3, col4 = st.columns(4) with col1: if st.button("Apply Balanced Growth", key="apply_balanced"): if balanced_portfolio: st.session_state.etf_allocations = [ {"ticker": etf['ticker'], "allocation": int(round(etf['allocation']))} for etf in balanced_portfolio ] st.success("Applied Balanced Growth strategy!") st.rerun() with col2: if st.button("Apply Income Focus", key="apply_income"): if income_portfolio: st.session_state.etf_allocations = [ {"ticker": etf['ticker'], "allocation": int(round(etf['allocation']))} for etf in income_portfolio ] st.success("Applied Income Focus strategy!") st.rerun() with col3: if st.button("Apply Growth Focus", key="apply_growth"): if growth_portfolio: st.session_state.etf_allocations = [ {"ticker": etf['ticker'], "allocation": int(round(etf['allocation']))} for etf in growth_portfolio ] st.success("Applied Growth Focus strategy!") st.rerun() with col4: if st.button("Apply Risk-Adjusted", key="apply_risk_adjusted"): if risk_adjusted_portfolio: st.session_state.etf_allocations = [ {"ticker": etf['ticker'], "allocation": int(round(etf['allocation']))} for etf in risk_adjusted_portfolio ] st.success("Applied Risk-Adjusted strategy!") st.rerun() except ValueError as e: st.error(str(e)) except Exception as e: st.error(f"An unexpected error occurred: {str(e)}") logger.error(f"Error generating portfolio suggestions: {str(e)}", exc_info=True) except Exception as e: st.error(f"Error initializing services: {str(e)}") logger.error(f"Error initializing services: {str(e)}", exc_info=True) with tab5: st.subheader("📊 ETF Details") st.write("This tab will contain detailed information about the selected ETFs.")