AlexStav commited on
Commit
f0b24d3
1 Parent(s): 0695f38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -1,6 +1,7 @@
1
 
2
  import streamlit as st
3
  import pandas as pd
 
4
  from prophet import Prophet
5
  import yfinance as yf
6
  from sklearn.metrics import mean_absolute_error, mean_squared_error
@@ -61,22 +62,27 @@ def main():
61
 
62
  st.subheader('Forecast Data')
63
  st.write('The table below shows the forecasted stock prices along with the lower and upper bounds of the predictions.')
64
- st.write(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail())
65
 
66
  st.subheader('Forecast Plot')
67
  st.write('The plot below visualizes the predicted stock prices with their confidence intervals.')
68
  fig1 = plot_plotly(model, forecast)
69
- fig1.update_traces(marker=dict(color='red'), line=dict(color='black'))
70
  st.plotly_chart(fig1)
71
 
72
  st.subheader('Forecast Components')
73
  st.write('This plot breaks down the forecast into trend, weekly, and yearly components.')
74
  fig2 = plot_components_plotly(model, forecast)
75
- fig2.update_traces(line=dict(color='black'))
76
  st.plotly_chart(fig2)
77
 
78
  st.subheader('Performance Metrics')
79
- st.write('The metrics below provide a quantitative measure of the model’s accuracy. The Mean Absolute Error (MAE) is the average absolute difference between predicted and actual values, Mean Squared Error (MSE) is the average squared difference, and Root Mean Squared Error (RMSE) is the square root of MSE, which is more interpretable in the same units as the target variable.')
 
 
 
 
 
80
  actual = df['y']
81
  predicted = forecast['yhat'][:len(df)]
82
  metrics = calculate_performance_metrics(actual, predicted)
 
1
 
2
  import streamlit as st
3
  import pandas as pd
4
+ import numpy as np
5
  from prophet import Prophet
6
  import yfinance as yf
7
  from sklearn.metrics import mean_absolute_error, mean_squared_error
 
62
 
63
  st.subheader('Forecast Data')
64
  st.write('The table below shows the forecasted stock prices along with the lower and upper bounds of the predictions.')
65
+ st.write(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].head())
66
 
67
  st.subheader('Forecast Plot')
68
  st.write('The plot below visualizes the predicted stock prices with their confidence intervals.')
69
  fig1 = plot_plotly(model, forecast)
70
+ fig1.update_traces(marker=dict(color='red'), line=dict(color='white'))
71
  st.plotly_chart(fig1)
72
 
73
  st.subheader('Forecast Components')
74
  st.write('This plot breaks down the forecast into trend, weekly, and yearly components.')
75
  fig2 = plot_components_plotly(model, forecast)
76
+ fig2.update_traces(line=dict(color='white'))
77
  st.plotly_chart(fig2)
78
 
79
  st.subheader('Performance Metrics')
80
+ st.write("""
81
+ The metrics below provide a quantitative measure of the model’s accuracy:
82
+ - **Mean Absolute Error (MAE)**: A lower value indicates better performance.
83
+ - **Mean Squared Error (MSE)**: A lower value indicates better performance, and it penalizes larger errors more than MAE.
84
+ - **Root Mean Squared Error (RMSE)**: A lower value indicates better performance, similar to MSE, but in the same units as the target variable.
85
+ """)
86
  actual = df['y']
87
  predicted = forecast['yhat'][:len(df)]
88
  metrics = calculate_performance_metrics(actual, predicted)