tmotagam commited on
Commit
31158e5
β€’
1 Parent(s): 675c14a
environment.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: VARIMA_DEMO
2
+ channels:
3
+ - conda-forge
4
+ dependencies:
5
+ - python=3.10
6
+ - ipykernel
7
+ - pandas
8
+ - pip
9
+ - matplotlib
10
+ - numpy
11
+ - vadersentiment
12
+ - statsmodels
13
+
14
+ - pip:
15
+ - voila==0.5.0
16
+ - voila-topbar==0.1.1
notebooks/ADA-USD.xlsx ADDED
Binary file (147 kB). View file
 
notebooks/ADA.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells":[{"cell_type":"markdown","metadata":{"id":"irFBIpdC1W2b"},"source":["# ADA with Vector Autoregressive (VAR) model\n"]},{"cell_type":"markdown","metadata":{"id":"OB_XEVTu1W2d"},"source":["## Importing/Downloading all the libraries required"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ixZomsiV1W2e"},"outputs":[],"source":["import re\n","import pandas as pd\n","import numpy as np\n","import matplotlib.pyplot as plt\n","from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n","from statsmodels.tsa.api import VAR\n","from statsmodels.tsa.stattools import adfuller\n","from statsmodels.tsa.stattools import grangercausalitytests\n","from statsmodels.tsa.vector_ar.vecm import coint_johansen\n","from statsmodels.stats.stattools import durbin_watson"]},{"cell_type":"markdown","metadata":{"id":"GDCjLhLw1W2g"},"source":["## Data Preprocessing\n"]},{"cell_type":"markdown","metadata":{"id":"y7vLTDJG1W2g"},"source":["### Importing and summarizing the datasets"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"re-YPLB51W2g"},"outputs":[],"source":["sentimentdf = pd.read_parquet(\"hf://datasets/tmotagam/Cryptocurrencies-sentiment-from-X/ADA-sentiment-dataset.parquet\")\n","sentimentdf.drop('id', axis=1, inplace=True)\n","sentimentdf.set_index('date', inplace=True)\n","adadf = pd.read_excel('ADA-USD.xlsx', parse_dates=['timestamp'], index_col=0)\n","print('====================================================================================')\n","print('ADA Sentiment Summary:')\n","print(sentimentdf.describe())\n","print('====================================================================================')\n","print('ADA Sentiment Data:')\n","print(sentimentdf.tail())\n","print('====================================================================================')\n","print('ADA Price Summary:')\n","print(adadf.describe())\n","print('====================================================================================')\n","print('ADA Price Data:')\n","print(adadf.tail())\n","print('====================================================================================')"]},{"cell_type":"markdown","metadata":{"id":"oLdbp5Dv2FQk"},"source":["### Removing duplicate and unwanted data points, columns from the datasets"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"UhYHtKf22I2D"},"outputs":[],"source":["sentimentdf['tmpdate'] = sentimentdf.index\n","date_ids = sentimentdf['tmpdate'].unique()\n","for date in date_ids:\n"," tmpdf = sentimentdf[sentimentdf['tmpdate'] == date]\n"," tmpdf = tmpdf.drop_duplicates()\n"," sentimentdf = pd.concat([sentimentdf, tmpdf]).drop_duplicates()\n","sentimentdf = sentimentdf.drop('tmpdate', axis=1)\n","adadf.drop(['low', 'open', 'volume', 'close', 'high'], axis=1, inplace=True)\n","adadf = adadf.loc['2021-12-29':]\n","print('====================================================================================')\n","print('ADA Sentiment Summary:')\n","print(sentimentdf.describe())\n","print('====================================================================================')\n","print('ADA Sentiment Data:')\n","print(sentimentdf.head())\n","print('====================================================================================')\n","print('ADA Price Summary:')\n","print(adadf.describe())\n","print('====================================================================================')\n","print('ADA Price Data:')\n","print(adadf.head())\n","print('====================================================================================')"]},{"cell_type":"markdown","metadata":{"id":"fbZ57XSA2TQW"},"source":["### Getting sentiment score and there average using VADER"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"UdfXjmTB2VU0"},"outputs":[],"source":["analyzer = SentimentIntensityAnalyzer()\n","sentimentdf['neg'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['neg'] for x in sentimentdf['content']]\n","sentimentdf['pos'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['pos'] for x in sentimentdf['content']]\n","sentimentdf['neu'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['neu'] for x in sentimentdf['content']]\n","sentimentdf.drop(['content'], axis=1, inplace=True)\n","df_grouped = sentimentdf.groupby(sentimentdf.index.date)\n","averages = df_grouped.apply(lambda x: np.sum(x, axis=0) / x.shape[0])\n","averages_reshape = np.vstack(averages.values)\n","df_averages = pd.DataFrame(averages_reshape, index=averages.index, columns=sentimentdf.columns)\n","print('====================================================================================')\n","print('ADA Sentiment Summary:')\n","print(df_averages.describe())\n","print('====================================================================================')\n","print('ADA Sentiment Data:')\n","print(df_averages.head())\n","print('====================================================================================')"]},{"cell_type":"markdown","metadata":{"id":"9idp3wxG2acV"},"source":["### Combining the two datasets"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"lkLb2qew2ccK"},"outputs":[],"source":["df = adadf.assign(neg=df_averages['neg'], pos=df_averages['pos'], neu=df_averages['neu'])\n","print('====================================================================================')\n","print('Summary:')\n","print(df.describe())\n","print('====================================================================================')\n","print('Data:')\n","print(df.head())\n","print('====================================================================================')"]},{"cell_type":"markdown","metadata":{"id":"WlhDrFeZ1W2h"},"source":["### Plotting the dataset\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"OD15VEcc1W2h"},"outputs":[],"source":["fig, axes = plt.subplots(nrows=4, ncols=1, dpi=120, figsize=(10,6))\n","for i, ax in enumerate(axes.flatten()):\n"," data = df[df.columns[i]]\n"," ax.plot(data, color='red', linewidth=1)\n"," ax.set_title(df.columns[i])\n"," ax.xaxis.set_ticks_position('none')\n"," ax.yaxis.set_ticks_position('none')\n"," ax.spines[\"top\"].set_alpha(0)\n"," ax.tick_params(labelsize=6)\n","\n","plt.tight_layout()\n","plt.show()"]},{"cell_type":"markdown","metadata":{"id":"XPbRamoy1W2h"},"source":["### Granger Causality Test\n","\n","Granger Causality Test is of all possible combinations of the Time series.\n","The rows are the response variable, columns are predictors. The values in the table\n","are the P-Values. P-Values lesser than the significance level (0.05), implies\n","the Null Hypothesis that the coefficients of the corresponding past values is\n","zero, that is, the X does not cause Y can be rejected.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"OonZX3qV1W2i"},"outputs":[],"source":["maxlag=12\n","test = 'ssr_chi2test'\n","def grangers_causation_matrix(data, variables, test='ssr_chi2test', verbose=False):\n"," df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables)\n"," for c in df.columns:\n"," for r in df.index:\n"," test_result = grangercausalitytests(data[[r, c]], maxlag=maxlag)\n"," p_values = [round(test_result[i+1][0][test][1],4) for i in range(maxlag)]\n"," min_p_value = np.min(p_values)\n"," df.loc[r, c] = min_p_value\n"," df.columns = [var + '_x' for var in variables]\n"," df.index = [var + '_y' for var in variables]\n"," return df\n","\n","grangers_causation_matrix(df, variables = df.columns)"]},{"cell_type":"markdown","metadata":{"id":"r-93vGD51W2i"},"source":["### Johanson's Cointegration Test\n","\n","The Johansen test, named after SΓΈren Johansen, is a procedure for testing cointegration of several, say k, I(1) time series.\n","This test permits more than one cointegrating relationship so is more generally applicable than the Engle–Granger test which is based on the Dickey–Fuller (or the augmented) test for unit roots in the residuals from a single (estimated) cointegrating relationship.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"6U6VD8HU1W2i"},"outputs":[],"source":["def cointegration_test(df, alpha=0.05):\n"," out = coint_johansen(df,-1,5)\n"," d = {'0.90':0, '0.95':1, '0.99':2}\n"," traces = out.lr1\n"," cvts = out.cvt[:, d[str(1-alpha)]]\n"," def adjust(val, length= 6): return str(val).ljust(length)\n","\n"," # Summary\n"," print('Name :: Test Stat > C(95%) => Signif \\n', '--'*20)\n"," for col, trace, cvt in zip(df.columns, traces, cvts):\n"," print(adjust(col), ':: ', adjust(round(trace,2), 9), \">\", adjust(cvt, 8), ' => ' , trace > cvt)\n","\n","cointegration_test(df)"]},{"cell_type":"markdown","metadata":{"id":"rUuebS941W2j"},"source":["### Train and Test Split\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"L46u50bg1W2j"},"outputs":[],"source":["nobs = 10 # number of observations to be forecasted\n","df_train, df_test = df[0:-nobs], df[-nobs:]\n","\n","print(df_train.shape)\n","print(df_test.shape)"]},{"cell_type":"markdown","metadata":{"id":"Mp3J_o0M1W2j"},"source":["### ADFuller to test for Stationarity of given series\n","\n","An augmented Dickey–Fuller test (ADF) tests the null hypothesis that a unit root is present in a time series sample.\n","The alternative hypothesis is different depending on which version of the test is used, but is usually stationarity or trend-stationarity.\n","It is an augmented version of the Dickey–Fuller test for a larger and more complicated set of time series models.\n","\n","The augmented Dickey–Fuller (ADF) statistic, used in the test, is a negative number.\n","The more negative it is, the stronger the rejection of the hypothesis that there is a unit root at some level of confidence.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"SseTXs4D1W2j"},"outputs":[],"source":["def adfuller_test(series,name, signif=0.05, verbose=False):\n"," r = adfuller(series, autolag='AIC')\n"," output = {'test_statistic':round(r[0], 4), 'pvalue':round(r[1], 4), 'n_lags':round(r[2], 4), 'n_obs':r[3]}\n"," p_value = output['pvalue']\n"," def adjust(val, length= 6): return str(val).ljust(length)\n","\n"," print(f' Augmented Dickey-Fuller Test on \"{name}\"', \"\\n \", '-'*47)\n"," print(f' Null Hypothesis: Data has unit root. Non-Stationary.')\n"," print(f' Significance Level = {signif}')\n"," print(f' Test Statistic = {output[\"test_statistic\"]}')\n"," print(f' No. Lags Chosen = {output[\"n_lags\"]}')\n","\n"," for key,val in r[4].items():\n"," print(f' Critical value {adjust(key)} = {round(val, 3)}')\n","\n"," if p_value <= signif:\n"," print(f\" => P-Value = {p_value}. Rejecting Null Hypothesis.\")\n"," print(f\" => Series is Stationary.\")\n"," else:\n"," print(f\" => P-Value = {p_value}. Weak evidence to reject the Null Hypothesis.\")\n"," print(f\" => Series is Non-Stationary.\")\n","\n","for name, column in df_train.items():\n"," adfuller_test(column, name=name)\n"," print('\\n')"]},{"cell_type":"markdown","metadata":{"id":"zCCmEII91W2j"},"source":["### Since the series is non stationary we will perform differencing and run the ADF test again\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"APM6v6OY1W2k"},"outputs":[],"source":["df_differenced = df_train.diff().dropna()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"oIX9Wl-L1W2k"},"outputs":[],"source":["for name, column in df_differenced.items():\n"," adfuller_test(column, name=name)\n"," print('\\n')"]},{"cell_type":"markdown","metadata":{"id":"XWv803Qk1W2k"},"source":["### Selecting Lag Order (p) for VAR model\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"mv5KKYht1W2k"},"outputs":[],"source":["model = VAR(df_differenced)\n","for i in [1,2,3,4,5,6,7,8,9]:\n"," result = model.fit(i)\n"," print('Lag Order =', i)\n"," print('AIC : ', result.aic)\n"," print('BIC : ', result.bic)\n"," print('FPE : ', result.fpe)\n"," print('HQIC: ', result.hqic, '\\n')\n","\n","x = model.select_order(maxlags=12)\n","x.summary()"]},{"cell_type":"markdown","metadata":{"id":"7doef7501W2k"},"source":["## Model Training\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"wCaMxcs31W2k"},"outputs":[],"source":["model_fitted = model.fit(6)\n","model_fitted.summary()"]},{"cell_type":"markdown","metadata":{"id":"g1s2S43x1W2l"},"source":["## Durbin Watson Test\n","\n","The Durbin–Watson statistic is a test statistic used to detect the presence of autocorrelation at lag 1 in the residuals (prediction errors) from a regression analysis.\n","It is named after James Durbin and Geoffrey Watson.\n","The small sample distribution of this ratio was derived by John von Neumann (von Neumann, 1941).\n","Durbin and Watson (1950, 1951) applied this statistic to the residuals from least squares regressions, and developed bounds tests for the null hypothesis that the errors are serially uncorrelated against the alternative that they follow a first order autoregressive process.\n","Note that the distribution of this test statistic does not depend on the estimated regression coefficients and the variance of the errors.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"fFXUrYDo1W2l"},"outputs":[],"source":["out = durbin_watson(model_fitted.resid)\n","\n","for col, val in zip(df.columns, out):\n"," print(col, ':', round(val, 2))"]},{"cell_type":"markdown","metadata":{"id":"wnbcWY9C1W2l"},"source":["### Forecasting\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"xEScMhEk1W2l"},"outputs":[],"source":["# Get the lag order\n","lag_order = model_fitted.k_ar\n","print(lag_order)\n","\n","# Input data for forecasting\n","forecast_input = df_differenced.values[-lag_order:]\n","print(forecast_input)\n","\n","fc = model_fitted.forecast(y=forecast_input, steps=nobs)\n","df_forecast = pd.DataFrame(fc, index=df.index[-nobs:], columns=df.columns + '_1d')\n","df_forecast"]},{"cell_type":"markdown","metadata":{"id":"IPPCgS171W2l"},"source":["## Inversion of differencing\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"iRXtp-2S1W2l"},"outputs":[],"source":["def invert_transformation(df_train, df_forecast, second_diff=False):\n"," df_fc = df_forecast.copy()\n"," columns = df_train.columns\n"," for col in columns:\n"," # Roll back 2nd Diff\n"," if second_diff:\n"," df_fc[str(col)+'_1d'] = (df_train[col].iloc[-1]-df_train[col].iloc[-2]) + df_fc[str(col)+'_2d'].cumsum()\n"," # Roll back 1st Diff\n"," df_fc[str(col)+'_forecast'] = df_train[col].iloc[-1] + df_fc[str(col)+'_1d'].cumsum()\n"," return df_fc\n","\n","df_results = invert_transformation(df_train, df_forecast, second_diff=False)\n","df_results.loc[:, ['adjclose_forecast', 'neg_forecast', 'pos_forecast', 'neu_forecast']]"]},{"cell_type":"markdown","metadata":{"id":"Op0vgB-X1W2l"},"source":["## Plot Forcast\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"CWYiWQ4p1W2m"},"outputs":[],"source":["fig, axes = plt.subplots(nrows=len(df.columns), ncols=1, dpi=150, figsize=(10,10))\n","for i, (col,ax) in enumerate(zip(df.columns, axes.flatten())):\n"," df_results[col+'_forecast'].plot(legend=True, ax=ax).autoscale(axis='x',tight=True)\n"," df_test[col][-nobs:].plot(legend=True, ax=ax)\n"," ax.set_title(col + \": Forecast vs Actuals\")\n"," ax.xaxis.set_ticks_position('none')\n"," ax.yaxis.set_ticks_position('none')\n"," ax.spines[\"top\"].set_alpha(0)\n"," ax.tick_params(labelsize=6)\n","\n","plt.tight_layout()\n","plt.show()"]},{"cell_type":"markdown","metadata":{"id":"Jl6xEsxJ1W2m"},"source":["## Error of Forecast\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"AbY8Kh8i1W2m"},"outputs":[],"source":["def forecast_accuracy(forecast, actual):\n"," mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE\n"," me = np.mean(forecast - actual) # ME\n"," mae = np.mean(np.abs(forecast - actual)) # MAE\n"," mpe = np.mean((forecast - actual)/actual) # MPE\n"," rmse = np.mean((forecast - actual)**2)**.5 # RMSE\n"," corr = np.corrcoef(forecast, actual)[0,1] # corr\n"," mins = np.amin(np.hstack([forecast[:,None],\n"," actual[:,None]]), axis=1)\n"," maxs = np.amax(np.hstack([forecast[:,None],\n"," actual[:,None]]), axis=1)\n"," minmax = 1 - np.mean(mins/maxs) # minmax\n"," return({'mape':mape, 'me':me, 'mae': mae,\n"," 'mpe': mpe, 'rmse':rmse, 'corr':corr, 'minmax':minmax})\n","\n","print('Forecast Accuracy of: adjclose')\n","accuracy_prod = forecast_accuracy(df_results['adjclose_forecast'].values, df_test['adjclose'].values)\n","for k, v in accuracy_prod.items():\n"," print(k, ': ', round(v,4))\n","\n","print('\\nForecast Accuracy of: neg')\n","accuracy_prod = forecast_accuracy(df_results['neg_forecast'].values, df_test['neg'].values)\n","for k, v in accuracy_prod.items():\n"," print(k, ': ', round(v,4))\n","\n","print('\\nForecast Accuracy of: pos')\n","accuracy_prod = forecast_accuracy(df_results['pos_forecast'].values, df_test['pos'].values)\n","for k, v in accuracy_prod.items():\n"," print(k, ': ', round(v,4))\n","\n","print('\\nForecast Accuracy of: neu')\n","accuracy_prod = forecast_accuracy(df_results['neu_forecast'].values, df_test['neu'].values)\n","for k, v in accuracy_prod.items():\n"," print(k, ': ', round(v,4))"]}],"metadata":{"colab":{"provenance":[]},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.8"}},"nbformat":4,"nbformat_minor":0}
notebooks/BNB-USD.xlsx ADDED
Binary file (144 kB). View file
 
notebooks/BNB.ipynb ADDED
@@ -0,0 +1,654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "AzjuIqm8XkVM"
7
+ },
8
+ "source": [
9
+ "# BNB with Vector Autoregressive (VAR) model\n"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "wHEWczG-XkVO"
16
+ },
17
+ "source": [
18
+ "## Importing/Downloading all the libraries required"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {
25
+ "id": "5MSWcYhxXkVO"
26
+ },
27
+ "outputs": [],
28
+ "source": [
29
+ "import re\n",
30
+ "import pandas as pd\n",
31
+ "import numpy as np\n",
32
+ "import matplotlib.pyplot as plt\n",
33
+ "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n",
34
+ "from statsmodels.tsa.api import VAR\n",
35
+ "from statsmodels.tsa.stattools import adfuller\n",
36
+ "from statsmodels.tsa.stattools import grangercausalitytests\n",
37
+ "from statsmodels.tsa.vector_ar.vecm import coint_johansen\n",
38
+ "from statsmodels.stats.stattools import durbin_watson"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "markdown",
43
+ "metadata": {
44
+ "id": "xMBiAqfnXkVP"
45
+ },
46
+ "source": [
47
+ "## Data Preprocessing\n"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "markdown",
52
+ "metadata": {
53
+ "id": "72b3-smHXkVQ"
54
+ },
55
+ "source": [
56
+ "### Importing and summarizing the datasets"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "metadata": {
63
+ "id": "BkOzHFqYXkVQ"
64
+ },
65
+ "outputs": [],
66
+ "source": [
67
+ "sentimentdf = pd.read_parquet(\"hf://datasets/tmotagam/Cryptocurrencies-sentiment-from-X/BNB-sentiment-dataset.parquet\")\n",
68
+ "sentimentdf.drop('id', axis=1, inplace=True)\n",
69
+ "sentimentdf.set_index('date', inplace=True)\n",
70
+ "bnbdf = pd.read_excel('BNB-USD.xlsx', parse_dates=['timestamp'], index_col=0)\n",
71
+ "print('====================================================================================')\n",
72
+ "print('BNB Sentiment Summary:')\n",
73
+ "print(sentimentdf.describe())\n",
74
+ "print('====================================================================================')\n",
75
+ "print('BNB Sentiment Data:')\n",
76
+ "print(sentimentdf.tail())\n",
77
+ "print('====================================================================================')\n",
78
+ "print('BNB Price Summary:')\n",
79
+ "print(bnbdf.describe())\n",
80
+ "print('====================================================================================')\n",
81
+ "print('BNB Price Data:')\n",
82
+ "print(bnbdf.tail())\n",
83
+ "print('====================================================================================')"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "metadata": {
89
+ "id": "WwondoY4Yxb9"
90
+ },
91
+ "source": [
92
+ "### Removing duplicate and unwanted data points, columns from the datasets"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": null,
98
+ "metadata": {
99
+ "id": "i5ZqpNPoYz_1"
100
+ },
101
+ "outputs": [],
102
+ "source": [
103
+ "sentimentdf['tmpdate'] = sentimentdf.index\n",
104
+ "date_ids = sentimentdf['tmpdate'].unique()\n",
105
+ "for date in date_ids:\n",
106
+ " tmpdf = sentimentdf[sentimentdf['tmpdate'] == date]\n",
107
+ " tmpdf = tmpdf.drop_duplicates()\n",
108
+ " sentimentdf = pd.concat([sentimentdf, tmpdf]).drop_duplicates()\n",
109
+ "sentimentdf = sentimentdf.drop('tmpdate', axis=1)\n",
110
+ "bnbdf.drop(['low', 'open', 'volume', 'close', 'high'], axis=1, inplace=True)\n",
111
+ "bnbdf = bnbdf.loc['2021-12-29':]\n",
112
+ "print('====================================================================================')\n",
113
+ "print('BNB Sentiment Summary:')\n",
114
+ "print(sentimentdf.describe())\n",
115
+ "print('====================================================================================')\n",
116
+ "print('BNB Sentiment Data:')\n",
117
+ "print(sentimentdf.head())\n",
118
+ "print('====================================================================================')\n",
119
+ "print('BNB Price Summary:')\n",
120
+ "print(bnbdf.describe())\n",
121
+ "print('====================================================================================')\n",
122
+ "print('BNB Price Data:')\n",
123
+ "print(bnbdf.head())\n",
124
+ "print('====================================================================================')"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "markdown",
129
+ "metadata": {
130
+ "id": "xOTtORZwZIxE"
131
+ },
132
+ "source": [
133
+ "### Getting sentiment score and there average using VADER"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": null,
139
+ "metadata": {
140
+ "id": "QzmxUlXCZJjE"
141
+ },
142
+ "outputs": [],
143
+ "source": [
144
+ "analyzer = SentimentIntensityAnalyzer()\n",
145
+ "sentimentdf['neg'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['neg'] for x in sentimentdf['content']]\n",
146
+ "sentimentdf['pos'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['pos'] for x in sentimentdf['content']]\n",
147
+ "sentimentdf['neu'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['neu'] for x in sentimentdf['content']]\n",
148
+ "sentimentdf.drop(['content'], axis=1, inplace=True)\n",
149
+ "df_grouped = sentimentdf.groupby(sentimentdf.index.date)\n",
150
+ "averages = df_grouped.apply(lambda x: np.sum(x, axis=0) / x.shape[0])\n",
151
+ "averages_reshape = np.vstack(averages.values)\n",
152
+ "df_averages = pd.DataFrame(averages_reshape, index=averages.index, columns=sentimentdf.columns)\n",
153
+ "print('====================================================================================')\n",
154
+ "print('BNB Sentiment Summary:')\n",
155
+ "print(df_averages.describe())\n",
156
+ "print('====================================================================================')\n",
157
+ "print('BNB Sentiment Data:')\n",
158
+ "print(df_averages.tail())\n",
159
+ "print('====================================================================================')"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "markdown",
164
+ "metadata": {
165
+ "id": "DuqriTWvZQ6L"
166
+ },
167
+ "source": [
168
+ "### Combining the two datasets"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": null,
174
+ "metadata": {
175
+ "id": "JGA0mmRWZR37"
176
+ },
177
+ "outputs": [],
178
+ "source": [
179
+ "df = bnbdf.assign(neg=df_averages['neg'], pos=df_averages['pos'], neu=df_averages['neu'])\n",
180
+ "print('====================================================================================')\n",
181
+ "print('Summary:')\n",
182
+ "print(df.describe())\n",
183
+ "print('====================================================================================')\n",
184
+ "print('Data:')\n",
185
+ "print(df.head())\n",
186
+ "print('====================================================================================')"
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "metadata": {
192
+ "id": "tUYrxTjYXkVQ"
193
+ },
194
+ "source": [
195
+ "### Plotting the dataset\n"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": null,
201
+ "metadata": {
202
+ "id": "CJSJRPy6XkVQ"
203
+ },
204
+ "outputs": [],
205
+ "source": [
206
+ "fig, axes = plt.subplots(nrows=4, ncols=1, dpi=120, figsize=(10,6))\n",
207
+ "for i, ax in enumerate(axes.flatten()):\n",
208
+ " data = df[df.columns[i]]\n",
209
+ " ax.plot(data, color='red', linewidth=1)\n",
210
+ " ax.set_title(df.columns[i])\n",
211
+ " ax.xaxis.set_ticks_position('none')\n",
212
+ " ax.yaxis.set_ticks_position('none')\n",
213
+ " ax.spines[\"top\"].set_alpha(0)\n",
214
+ " ax.tick_params(labelsize=6)\n",
215
+ "\n",
216
+ "plt.tight_layout()\n",
217
+ "plt.show()"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "markdown",
222
+ "metadata": {
223
+ "id": "01lmepp2XkVR"
224
+ },
225
+ "source": [
226
+ "### Granger Causality Test\n",
227
+ "\n",
228
+ "Granger Causality Test is of all possible combinations of the Time series.\n",
229
+ "The rows are the response variable, columns are predictors. The values in the table\n",
230
+ "are the P-Values. P-Values lesser than the significance level (0.05), implies\n",
231
+ "the Null Hypothesis that the coefficients of the corresponding past values is\n",
232
+ "zero, that is, the X does not cause Y can be rejected.\n"
233
+ ]
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "execution_count": null,
238
+ "metadata": {
239
+ "id": "W2kCxKeiXkVR"
240
+ },
241
+ "outputs": [],
242
+ "source": [
243
+ "maxlag=12\n",
244
+ "test = 'ssr_chi2test'\n",
245
+ "def grangers_causation_matrix(data, variables, test='ssr_chi2test', verbose=False):\n",
246
+ " df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables)\n",
247
+ " for c in df.columns:\n",
248
+ " for r in df.index:\n",
249
+ " test_result = grangercausalitytests(data[[r, c]], maxlag=maxlag)\n",
250
+ " p_values = [round(test_result[i+1][0][test][1],4) for i in range(maxlag)]\n",
251
+ " min_p_value = np.min(p_values)\n",
252
+ " df.loc[r, c] = min_p_value\n",
253
+ " df.columns = [var + '_x' for var in variables]\n",
254
+ " df.index = [var + '_y' for var in variables]\n",
255
+ " return df\n",
256
+ "\n",
257
+ "grangers_causation_matrix(df, variables = df.columns)"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "markdown",
262
+ "metadata": {
263
+ "id": "Isb48EmCXkVR"
264
+ },
265
+ "source": [
266
+ "### Johanson's Cointegration Test\n",
267
+ "\n",
268
+ "The Johansen test, named after SΓΈren Johansen, is a procedure for testing cointegration of several, say k, I(1) time series.\n",
269
+ "This test permits more than one cointegrating relationship so is more generally applicable than the Engle–Granger test which is based on the Dickey–Fuller (or the augmented) test for unit roots in the residuals from a single (estimated) cointegrating relationship.\n"
270
+ ]
271
+ },
272
+ {
273
+ "cell_type": "code",
274
+ "execution_count": null,
275
+ "metadata": {
276
+ "id": "s_X5RDSnXkVS"
277
+ },
278
+ "outputs": [],
279
+ "source": [
280
+ "def cointegration_test(df, alpha=0.05):\n",
281
+ " out = coint_johansen(df,-1,5)\n",
282
+ " d = {'0.90':0, '0.95':1, '0.99':2}\n",
283
+ " traces = out.lr1\n",
284
+ " cvts = out.cvt[:, d[str(1-alpha)]]\n",
285
+ " def adjust(val, length= 6): return str(val).ljust(length)\n",
286
+ "\n",
287
+ " # Summary\n",
288
+ " print('Name :: Test Stat > C(95%) => Signif \\n', '--'*20)\n",
289
+ " for col, trace, cvt in zip(df.columns, traces, cvts):\n",
290
+ " print(adjust(col), ':: ', adjust(round(trace,2), 9), \">\", adjust(cvt, 8), ' => ' , trace > cvt)\n",
291
+ "\n",
292
+ "cointegration_test(df)"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "markdown",
297
+ "metadata": {
298
+ "id": "TavSdejEXkVS"
299
+ },
300
+ "source": [
301
+ "### Train and Test Split\n"
302
+ ]
303
+ },
304
+ {
305
+ "cell_type": "code",
306
+ "execution_count": null,
307
+ "metadata": {
308
+ "id": "inkc6vy_XkVS"
309
+ },
310
+ "outputs": [],
311
+ "source": [
312
+ "nobs = 10 # number of observations to be forecasted\n",
313
+ "df_train, df_test = df[0:-nobs], df[-nobs:]\n",
314
+ "\n",
315
+ "print(df_train.shape)\n",
316
+ "print(df_test.shape)"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "markdown",
321
+ "metadata": {
322
+ "id": "3PIWMeTyXkVS"
323
+ },
324
+ "source": [
325
+ "### ADFuller to test for Stationarity of given series\n",
326
+ "\n",
327
+ "An augmented Dickey–Fuller test (ADF) tests the null hypothesis that a unit root is present in a time series sample.\n",
328
+ "The alternative hypothesis is different depending on which version of the test is used, but is usually stationarity or trend-stationarity.\n",
329
+ "It is an augmented version of the Dickey–Fuller test for a larger and more complicated set of time series models.\n",
330
+ "\n",
331
+ "The augmented Dickey–Fuller (ADF) statistic, used in the test, is a negative number.\n",
332
+ "The more negative it is, the stronger the rejection of the hypothesis that there is a unit root at some level of confidence.\n"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": null,
338
+ "metadata": {
339
+ "id": "oEJRTZq0XkVT"
340
+ },
341
+ "outputs": [],
342
+ "source": [
343
+ "def adfuller_test(series,name, signif=0.05, verbose=False):\n",
344
+ " r = adfuller(series, autolag='AIC')\n",
345
+ " output = {'test_statistic':round(r[0], 4), 'pvalue':round(r[1], 4), 'n_lags':round(r[2], 4), 'n_obs':r[3]}\n",
346
+ " p_value = output['pvalue']\n",
347
+ " def adjust(val, length= 6): return str(val).ljust(length)\n",
348
+ "\n",
349
+ " print(f' Augmented Dickey-Fuller Test on \"{name}\"', \"\\n \", '-'*47)\n",
350
+ " print(f' Null Hypothesis: Data has unit root. Non-Stationary.')\n",
351
+ " print(f' Significance Level = {signif}')\n",
352
+ " print(f' Test Statistic = {output[\"test_statistic\"]}')\n",
353
+ " print(f' No. Lags Chosen = {output[\"n_lags\"]}')\n",
354
+ "\n",
355
+ " for key,val in r[4].items():\n",
356
+ " print(f' Critical value {adjust(key)} = {round(val, 3)}')\n",
357
+ "\n",
358
+ " if p_value <= signif:\n",
359
+ " print(f\" => P-Value = {p_value}. Rejecting Null Hypothesis.\")\n",
360
+ " print(f\" => Series is Stationary.\")\n",
361
+ " else:\n",
362
+ " print(f\" => P-Value = {p_value}. Weak evidence to reject the Null Hypothesis.\")\n",
363
+ " print(f\" => Series is Non-Stationary.\")\n",
364
+ "\n",
365
+ "for name, column in df_train.items():\n",
366
+ " adfuller_test(column, name=name)\n",
367
+ " print('\\n')"
368
+ ]
369
+ },
370
+ {
371
+ "cell_type": "markdown",
372
+ "metadata": {
373
+ "id": "LQtBiTbtXkVT"
374
+ },
375
+ "source": [
376
+ "### Since the series is non stationary we will perform differencing and run the ADF test again\n"
377
+ ]
378
+ },
379
+ {
380
+ "cell_type": "code",
381
+ "execution_count": null,
382
+ "metadata": {
383
+ "id": "896PtNX7XkVT"
384
+ },
385
+ "outputs": [],
386
+ "source": [
387
+ "df_differenced = df_train.diff().dropna()"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": null,
393
+ "metadata": {
394
+ "id": "TpxJOgXQXkVT"
395
+ },
396
+ "outputs": [],
397
+ "source": [
398
+ "for name, column in df_differenced.items():\n",
399
+ " adfuller_test(column, name=name)\n",
400
+ " print('\\n')"
401
+ ]
402
+ },
403
+ {
404
+ "cell_type": "markdown",
405
+ "metadata": {
406
+ "id": "TI5b4acLXkVT"
407
+ },
408
+ "source": [
409
+ "### Selecting Lag Order (p) for VAR model\n"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "code",
414
+ "execution_count": null,
415
+ "metadata": {
416
+ "id": "_PMFSk73XkVU"
417
+ },
418
+ "outputs": [],
419
+ "source": [
420
+ "model = VAR(df_differenced)\n",
421
+ "for i in [1,2,3,4,5,6,7,8,9]:\n",
422
+ " result = model.fit(i)\n",
423
+ " print('Lag Order =', i)\n",
424
+ " print('AIC : ', result.aic)\n",
425
+ " print('BIC : ', result.bic)\n",
426
+ " print('FPE : ', result.fpe)\n",
427
+ " print('HQIC: ', result.hqic, '\\n')\n",
428
+ "\n",
429
+ "x = model.select_order(maxlags=12)\n",
430
+ "x.summary()"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "markdown",
435
+ "metadata": {
436
+ "id": "PEIcc2ijXkVU"
437
+ },
438
+ "source": [
439
+ "## Model Training\n"
440
+ ]
441
+ },
442
+ {
443
+ "cell_type": "code",
444
+ "execution_count": null,
445
+ "metadata": {
446
+ "id": "n4JAkfpZXkVU"
447
+ },
448
+ "outputs": [],
449
+ "source": [
450
+ "model_fitted = model.fit(6)\n",
451
+ "model_fitted.summary()"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "markdown",
456
+ "metadata": {
457
+ "id": "GnKuEG78XkVU"
458
+ },
459
+ "source": [
460
+ "## Durbin Watson Test\n",
461
+ "\n",
462
+ "The Durbin–Watson statistic is a test statistic used to detect the presence of autocorrelation at lag 1 in the residuals (prediction errors) from a regression analysis.\n",
463
+ "It is named after James Durbin and Geoffrey Watson.\n",
464
+ "The small sample distribution of this ratio was derived by John von Neumann (von Neumann, 1941).\n",
465
+ "Durbin and Watson (1950, 1951) applied this statistic to the residuals from least squares regressions, and developed bounds tests for the null hypothesis that the errors are serially uncorrelated against the alternative that they follow a first order autoregressive process.\n",
466
+ "Note that the distribution of this test statistic does not depend on the estimated regression coefficients and the variance of the errors.\n"
467
+ ]
468
+ },
469
+ {
470
+ "cell_type": "code",
471
+ "execution_count": null,
472
+ "metadata": {
473
+ "id": "hUAiciYpXkVU"
474
+ },
475
+ "outputs": [],
476
+ "source": [
477
+ "out = durbin_watson(model_fitted.resid)\n",
478
+ "\n",
479
+ "for col, val in zip(df.columns, out):\n",
480
+ " print(col, ':', round(val, 2))"
481
+ ]
482
+ },
483
+ {
484
+ "cell_type": "markdown",
485
+ "metadata": {
486
+ "id": "NxbohO3eXkVU"
487
+ },
488
+ "source": [
489
+ "### Forecasting\n"
490
+ ]
491
+ },
492
+ {
493
+ "cell_type": "code",
494
+ "execution_count": null,
495
+ "metadata": {
496
+ "id": "jrcwq_mcXkVU"
497
+ },
498
+ "outputs": [],
499
+ "source": [
500
+ "# Get the lag order\n",
501
+ "lag_order = model_fitted.k_ar\n",
502
+ "print(lag_order)\n",
503
+ "\n",
504
+ "# Input data for forecasting\n",
505
+ "forecast_input = df_differenced.values[-lag_order:]\n",
506
+ "print(forecast_input)\n",
507
+ "\n",
508
+ "fc = model_fitted.forecast(y=forecast_input, steps=nobs)\n",
509
+ "df_forecast = pd.DataFrame(fc, index=df.index[-nobs:], columns=df.columns + '_1d')\n",
510
+ "df_forecast"
511
+ ]
512
+ },
513
+ {
514
+ "cell_type": "markdown",
515
+ "metadata": {
516
+ "id": "-35abzN2XkVV"
517
+ },
518
+ "source": [
519
+ "## Inverse the differencing\n"
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": null,
525
+ "metadata": {
526
+ "id": "2-720iuHXkVV"
527
+ },
528
+ "outputs": [],
529
+ "source": [
530
+ "def invert_transformation(df_train, df_forecast, second_diff=False):\n",
531
+ " df_fc = df_forecast.copy()\n",
532
+ " columns = df_train.columns\n",
533
+ " for col in columns:\n",
534
+ " # Roll back 2nd Diff\n",
535
+ " if second_diff:\n",
536
+ " df_fc[str(col)+'_1d'] = (df_train[col].iloc[-1]-df_train[col].iloc[-2]) + df_fc[str(col)+'_2d'].cumsum()\n",
537
+ " # Roll back 1st Diff\n",
538
+ " df_fc[str(col)+'_forecast'] = df_train[col].iloc[-1] + df_fc[str(col)+'_1d'].cumsum()\n",
539
+ " return df_fc\n",
540
+ "\n",
541
+ "df_results = invert_transformation(df_train, df_forecast, second_diff=False)\n",
542
+ "df_results.loc[:, ['adjclose_forecast', 'neg_forecast', 'pos_forecast', 'neu_forecast']]"
543
+ ]
544
+ },
545
+ {
546
+ "cell_type": "markdown",
547
+ "metadata": {
548
+ "id": "A1Gt4mpuXkVV"
549
+ },
550
+ "source": [
551
+ "## Plot Forcast\n"
552
+ ]
553
+ },
554
+ {
555
+ "cell_type": "code",
556
+ "execution_count": null,
557
+ "metadata": {
558
+ "id": "bXvGwhA1XkVV"
559
+ },
560
+ "outputs": [],
561
+ "source": [
562
+ "fig, axes = plt.subplots(nrows=len(df.columns), ncols=1, dpi=150, figsize=(10,10))\n",
563
+ "for i, (col,ax) in enumerate(zip(df.columns, axes.flatten())):\n",
564
+ " df_results[col+'_forecast'].plot(legend=True, ax=ax).autoscale(axis='x',tight=True)\n",
565
+ " df_test[col][-nobs:].plot(legend=True, ax=ax)\n",
566
+ " ax.set_title(col + \": Forecast vs Actuals\")\n",
567
+ " ax.xaxis.set_ticks_position('none')\n",
568
+ " ax.yaxis.set_ticks_position('none')\n",
569
+ " ax.spines[\"top\"].set_alpha(0)\n",
570
+ " ax.tick_params(labelsize=6)\n",
571
+ "\n",
572
+ "plt.tight_layout()\n",
573
+ "plt.show()"
574
+ ]
575
+ },
576
+ {
577
+ "cell_type": "markdown",
578
+ "metadata": {
579
+ "id": "mPqMcnO9XkVV"
580
+ },
581
+ "source": [
582
+ "## Error of Forecast\n"
583
+ ]
584
+ },
585
+ {
586
+ "cell_type": "code",
587
+ "execution_count": null,
588
+ "metadata": {
589
+ "id": "_ayTmFeTXkVV"
590
+ },
591
+ "outputs": [],
592
+ "source": [
593
+ "def forecast_accuracy(forecast, actual):\n",
594
+ " mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE\n",
595
+ " me = np.mean(forecast - actual) # ME\n",
596
+ " mae = np.mean(np.abs(forecast - actual)) # MAE\n",
597
+ " mpe = np.mean((forecast - actual)/actual) # MPE\n",
598
+ " rmse = np.mean((forecast - actual)**2)**.5 # RMSE\n",
599
+ " corr = np.corrcoef(forecast, actual)[0,1] # corr\n",
600
+ " mins = np.amin(np.hstack([forecast[:,None],\n",
601
+ " actual[:,None]]), axis=1)\n",
602
+ " maxs = np.amax(np.hstack([forecast[:,None],\n",
603
+ " actual[:,None]]), axis=1)\n",
604
+ " minmax = 1 - np.mean(mins/maxs) # minmax\n",
605
+ " return({'mape':mape, 'me':me, 'mae': mae,\n",
606
+ " 'mpe': mpe, 'rmse':rmse, 'corr':corr, 'minmax':minmax})\n",
607
+ "\n",
608
+ "print('Forecast Accuracy of: adjclose')\n",
609
+ "accuracy_prod = forecast_accuracy(df_results['adjclose_forecast'].values, df_test['adjclose'].values)\n",
610
+ "for k, v in accuracy_prod.items():\n",
611
+ " print(k, ': ', round(v,4))\n",
612
+ "\n",
613
+ "print('\\nForecast Accuracy of: pos')\n",
614
+ "accuracy_prod = forecast_accuracy(df_results['pos_forecast'].values, df_test['pos'].values)\n",
615
+ "for k, v in accuracy_prod.items():\n",
616
+ " print(k, ': ', round(v,4))\n",
617
+ "\n",
618
+ "print('\\nForecast Accuracy of: neg')\n",
619
+ "accuracy_prod = forecast_accuracy(df_results['neg_forecast'].values, df_test['neg'].values)\n",
620
+ "for k, v in accuracy_prod.items():\n",
621
+ " print(k, ': ', round(v,4))\n",
622
+ "\n",
623
+ "print('\\nForecast Accuracy of: neu')\n",
624
+ "accuracy_prod = forecast_accuracy(df_results['neu_forecast'].values, df_test['neu'].values)\n",
625
+ "for k, v in accuracy_prod.items():\n",
626
+ " print(k, ': ', round(v,4))"
627
+ ]
628
+ }
629
+ ],
630
+ "metadata": {
631
+ "colab": {
632
+ "provenance": []
633
+ },
634
+ "kernelspec": {
635
+ "display_name": "Python 3",
636
+ "language": "python",
637
+ "name": "python3"
638
+ },
639
+ "language_info": {
640
+ "codemirror_mode": {
641
+ "name": "ipython",
642
+ "version": 3
643
+ },
644
+ "file_extension": ".py",
645
+ "mimetype": "text/x-python",
646
+ "name": "python",
647
+ "nbconvert_exporter": "python",
648
+ "pygments_lexer": "ipython3",
649
+ "version": "3.11.8"
650
+ }
651
+ },
652
+ "nbformat": 4,
653
+ "nbformat_minor": 0
654
+ }
notebooks/ETH-USD.xlsx ADDED
Binary file (138 kB). View file
 
notebooks/ETH.ipynb ADDED
@@ -0,0 +1,654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "5cWJcqtEjnbd"
7
+ },
8
+ "source": [
9
+ "# ETH with Vector Autoregressive (VAR) model\n"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "sOQKeFpujnbf"
16
+ },
17
+ "source": [
18
+ "## Importing/Downloading all the libraries required\n"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {
25
+ "id": "xjrODJyZjnbg"
26
+ },
27
+ "outputs": [],
28
+ "source": [
29
+ "import re\n",
30
+ "import pandas as pd\n",
31
+ "import numpy as np\n",
32
+ "import matplotlib.pyplot as plt\n",
33
+ "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n",
34
+ "from statsmodels.tsa.api import VAR\n",
35
+ "from statsmodels.tsa.stattools import adfuller\n",
36
+ "from statsmodels.tsa.stattools import grangercausalitytests\n",
37
+ "from statsmodels.tsa.vector_ar.vecm import coint_johansen\n",
38
+ "from statsmodels.stats.stattools import durbin_watson"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "markdown",
43
+ "metadata": {
44
+ "id": "KjO6Yh6Mjnbh"
45
+ },
46
+ "source": [
47
+ "## Data Preprocessing\n"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "markdown",
52
+ "metadata": {
53
+ "id": "WGkqWrFNjnbh"
54
+ },
55
+ "source": [
56
+ "### Importing and summarizing the datasets\n"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "metadata": {
63
+ "id": "2LBKAyZ1jnbh"
64
+ },
65
+ "outputs": [],
66
+ "source": [
67
+ "sentimentdf = pd.read_parquet(\"hf://datasets/tmotagam/Cryptocurrencies-sentiment-from-X/ETH-sentiment-dataset.parquet\")\n",
68
+ "sentimentdf.drop('id', axis=1, inplace=True)\n",
69
+ "sentimentdf.set_index('date', inplace=True)\n",
70
+ "ethdf = pd.read_excel('ETH-USD.xlsx', parse_dates=['timestamp'], index_col=0)\n",
71
+ "print('====================================================================================')\n",
72
+ "print('ETH Sentiment Summary:')\n",
73
+ "print(sentimentdf.describe())\n",
74
+ "print('====================================================================================')\n",
75
+ "print('ETH Sentiment Data:')\n",
76
+ "print(sentimentdf.tail())\n",
77
+ "print('====================================================================================')\n",
78
+ "print('ETH Price Summary:')\n",
79
+ "print(ethdf.describe())\n",
80
+ "print('====================================================================================')\n",
81
+ "print('ETH Price Data:')\n",
82
+ "print(ethdf.tail())\n",
83
+ "print('====================================================================================')"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "metadata": {
89
+ "id": "iPGJYM6sxcRZ"
90
+ },
91
+ "source": [
92
+ "### Removing duplicate and unwanted data points, columns from the datasets"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": null,
98
+ "metadata": {
99
+ "id": "e2BROPqpxjIq"
100
+ },
101
+ "outputs": [],
102
+ "source": [
103
+ "sentimentdf['tmpdate'] = sentimentdf.index\n",
104
+ "date_ids = sentimentdf['tmpdate'].unique()\n",
105
+ "for date in date_ids:\n",
106
+ " tmpdf = sentimentdf[sentimentdf['tmpdate'] == date]\n",
107
+ " tmpdf = tmpdf.drop_duplicates()\n",
108
+ " sentimentdf = pd.concat([sentimentdf, tmpdf]).drop_duplicates()\n",
109
+ "sentimentdf = sentimentdf.drop('tmpdate', axis=1)\n",
110
+ "ethdf.drop(['low', 'open', 'volume', 'close', 'high'], axis=1, inplace=True)\n",
111
+ "ethdf = ethdf.loc['2021-12-29':]\n",
112
+ "print('====================================================================================')\n",
113
+ "print('ETH Sentiment Summary:')\n",
114
+ "print(sentimentdf.describe())\n",
115
+ "print('====================================================================================')\n",
116
+ "print('ETH Sentiment Data:')\n",
117
+ "print(sentimentdf.head())\n",
118
+ "print('====================================================================================')\n",
119
+ "print('ETH Price Summary:')\n",
120
+ "print(ethdf.describe())\n",
121
+ "print('====================================================================================')\n",
122
+ "print('ETH Price Data:')\n",
123
+ "print(ethdf.head())\n",
124
+ "print('====================================================================================')"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "markdown",
129
+ "metadata": {
130
+ "id": "N4YKJdyONEp3"
131
+ },
132
+ "source": [
133
+ "### Getting sentiment score and there average using VADER"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": null,
139
+ "metadata": {
140
+ "id": "YCJQoAh5NYQ3"
141
+ },
142
+ "outputs": [],
143
+ "source": [
144
+ "analyzer = SentimentIntensityAnalyzer()\n",
145
+ "sentimentdf['neg'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['neg'] for x in sentimentdf['content']]\n",
146
+ "sentimentdf['pos'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['pos'] for x in sentimentdf['content']]\n",
147
+ "sentimentdf['neu'] = [analyzer.polarity_scores(re.sub(r\"(@[A-Za-z0–9_]+)|[^\\w\\s]|#|http\\S+\", \"\", x.replace(\"\\n\",\" \")))['neu'] for x in sentimentdf['content']]\n",
148
+ "sentimentdf.drop(['content'], axis=1, inplace=True)\n",
149
+ "df_grouped = sentimentdf.groupby(sentimentdf.index.date)\n",
150
+ "averages = df_grouped.apply(lambda x: np.sum(x, axis=0) / x.shape[0])\n",
151
+ "averages_reshape = np.vstack(averages.values)\n",
152
+ "df_averages = pd.DataFrame(averages_reshape, index=averages.index, columns=sentimentdf.columns)\n",
153
+ "print('====================================================================================')\n",
154
+ "print('ETH Sentiment Summary:')\n",
155
+ "print(df_averages.describe())\n",
156
+ "print('====================================================================================')\n",
157
+ "print('ETH Sentiment Data:')\n",
158
+ "print(df_averages.head())\n",
159
+ "print('====================================================================================')"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "markdown",
164
+ "metadata": {
165
+ "id": "8dcH6IsPa1Sc"
166
+ },
167
+ "source": [
168
+ "### Combining the two datasets"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": null,
174
+ "metadata": {
175
+ "id": "PiX5pqoSa5Pe"
176
+ },
177
+ "outputs": [],
178
+ "source": [
179
+ "df = ethdf.assign(neg=df_averages['neg'], pos=df_averages['pos'], neu=df_averages['neu'])\n",
180
+ "print('====================================================================================')\n",
181
+ "print('Summary:')\n",
182
+ "print(df.describe())\n",
183
+ "print('====================================================================================')\n",
184
+ "print('Data:')\n",
185
+ "print(df.head())\n",
186
+ "print('====================================================================================')"
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "metadata": {
192
+ "id": "UNvMzkrFjnbi"
193
+ },
194
+ "source": [
195
+ "### Plotting the dataset\n"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": null,
201
+ "metadata": {
202
+ "id": "BLQK4ZLkjnbi"
203
+ },
204
+ "outputs": [],
205
+ "source": [
206
+ "fig, axes = plt.subplots(nrows=4, ncols=1, dpi=120, figsize=(10,6))\n",
207
+ "for i, ax in enumerate(axes.flatten()):\n",
208
+ " data = df[df.columns[i]]\n",
209
+ " ax.plot(data, color='red', linewidth=1)\n",
210
+ " ax.set_title(df.columns[i])\n",
211
+ " ax.xaxis.set_ticks_position('none')\n",
212
+ " ax.yaxis.set_ticks_position('none')\n",
213
+ " ax.spines[\"top\"].set_alpha(0)\n",
214
+ " ax.tick_params(labelsize=6)\n",
215
+ "\n",
216
+ "plt.tight_layout()\n",
217
+ "plt.show()"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "markdown",
222
+ "metadata": {
223
+ "id": "tzrqJ76vjnbi"
224
+ },
225
+ "source": [
226
+ "### Granger Causality Test\n",
227
+ "\n",
228
+ "Granger Causality Test is of all possible combinations of the Time series.\n",
229
+ "The rows are the response variable, columns are predictors. The values in the table\n",
230
+ "are the P-Values. P-Values lesser than the significance level (0.05), implies\n",
231
+ "the Null Hypothesis that the coefficients of the corresponding past values is\n",
232
+ "zero, that is, the X does not cause Y can be rejected.\n"
233
+ ]
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "execution_count": null,
238
+ "metadata": {
239
+ "id": "9pEuAjX_jnbi"
240
+ },
241
+ "outputs": [],
242
+ "source": [
243
+ "maxlag=12\n",
244
+ "test = 'ssr_chi2test'\n",
245
+ "def grangers_causation_matrix(data, variables, test='ssr_chi2test', verbose=False):\n",
246
+ " df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables)\n",
247
+ " for c in df.columns:\n",
248
+ " for r in df.index:\n",
249
+ " test_result = grangercausalitytests(data[[r, c]], maxlag=maxlag)\n",
250
+ " p_values = [round(test_result[i+1][0][test][1],4) for i in range(maxlag)]\n",
251
+ " min_p_value = np.min(p_values)\n",
252
+ " df.loc[r, c] = min_p_value\n",
253
+ " df.columns = [var + '_x' for var in variables]\n",
254
+ " df.index = [var + '_y' for var in variables]\n",
255
+ " return df\n",
256
+ "\n",
257
+ "grangers_causation_matrix(df, variables = df.columns)"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "markdown",
262
+ "metadata": {
263
+ "id": "L-bLcFUmjnbj"
264
+ },
265
+ "source": [
266
+ "### Johanson's Cointegration Test\n",
267
+ "\n",
268
+ "The Johansen test, named after SΓΈren Johansen, is a procedure for testing cointegration of several, say k, I(1) time series.\n",
269
+ "This test permits more than one cointegrating relationship so is more generally applicable than the Engle–Granger test which is based on the Dickey–Fuller (or the augmented) test for unit roots in the residuals from a single (estimated) cointegrating relationship.\n"
270
+ ]
271
+ },
272
+ {
273
+ "cell_type": "code",
274
+ "execution_count": null,
275
+ "metadata": {
276
+ "id": "sfheMT7Rjnbr"
277
+ },
278
+ "outputs": [],
279
+ "source": [
280
+ "def cointegration_test(df, alpha=0.05):\n",
281
+ " out = coint_johansen(df,-1,5)\n",
282
+ " d = {'0.90':0, '0.95':1, '0.99':2}\n",
283
+ " traces = out.lr1\n",
284
+ " cvts = out.cvt[:, d[str(1-alpha)]]\n",
285
+ " def adjust(val, length= 6): return str(val).ljust(length)\n",
286
+ "\n",
287
+ " # Summary\n",
288
+ " print('Name :: Test Stat > C(95%) => Signif \\n', '--'*20)\n",
289
+ " for col, trace, cvt in zip(df.columns, traces, cvts):\n",
290
+ " print(adjust(col), ':: ', adjust(round(trace,2), 9), \">\", adjust(cvt, 8), ' => ' , trace > cvt)\n",
291
+ "\n",
292
+ "cointegration_test(df)"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "markdown",
297
+ "metadata": {
298
+ "id": "HnUIdTkNjnbr"
299
+ },
300
+ "source": [
301
+ "### Train and Test Split\n"
302
+ ]
303
+ },
304
+ {
305
+ "cell_type": "code",
306
+ "execution_count": null,
307
+ "metadata": {
308
+ "id": "7i6WyC9ejnbs"
309
+ },
310
+ "outputs": [],
311
+ "source": [
312
+ "nobs = 10 # number of observations to be forecasted\n",
313
+ "df_train, df_test = df[0:-nobs], df[-nobs:]\n",
314
+ "\n",
315
+ "print(df_train.shape)\n",
316
+ "print(df_test.shape)"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "markdown",
321
+ "metadata": {
322
+ "id": "h41rpvQajnbs"
323
+ },
324
+ "source": [
325
+ "### ADFuller to test for Stationarity of given series\n",
326
+ "\n",
327
+ "An augmented Dickey–Fuller test (ADF) tests the null hypothesis that a unit root is present in a time series sample.\n",
328
+ "The alternative hypothesis is different depending on which version of the test is used, but is usually stationarity or trend-stationarity.\n",
329
+ "It is an augmented version of the Dickey–Fuller test for a larger and more complicated set of time series models.\n",
330
+ "\n",
331
+ "The augmented Dickey–Fuller (ADF) statistic, used in the test, is a negative number.\n",
332
+ "The more negative it is, the stronger the rejection of the hypothesis that there is a unit root at some level of confidence.\n"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": null,
338
+ "metadata": {
339
+ "id": "wRCwciNGjnbs"
340
+ },
341
+ "outputs": [],
342
+ "source": [
343
+ "def adfuller_test(series,name, signif=0.05, verbose=False):\n",
344
+ " r = adfuller(series, autolag='AIC')\n",
345
+ " output = {'test_statistic':round(r[0], 4), 'pvalue':round(r[1], 4), 'n_lags':round(r[2], 4), 'n_obs':r[3]}\n",
346
+ " p_value = output['pvalue']\n",
347
+ " def adjust(val, length= 6): return str(val).ljust(length)\n",
348
+ "\n",
349
+ " print(f' Augmented Dickey-Fuller Test on \"{name}\"', \"\\n \", '-'*47)\n",
350
+ " print(f' Null Hypothesis: Data has unit root. Non-Stationary.')\n",
351
+ " print(f' Significance Level = {signif}')\n",
352
+ " print(f' Test Statistic = {output[\"test_statistic\"]}')\n",
353
+ " print(f' No. Lags Chosen = {output[\"n_lags\"]}')\n",
354
+ "\n",
355
+ " for key,val in r[4].items():\n",
356
+ " print(f' Critical value {adjust(key)} = {round(val, 3)}')\n",
357
+ "\n",
358
+ " if p_value <= signif:\n",
359
+ " print(f\" => P-Value = {p_value}. Rejecting Null Hypothesis.\")\n",
360
+ " print(f\" => Series is Stationary.\")\n",
361
+ " else:\n",
362
+ " print(f\" => P-Value = {p_value}. Weak evidence to reject the Null Hypothesis.\")\n",
363
+ " print(f\" => Series is Non-Stationary.\")\n",
364
+ "\n",
365
+ "for name, column in df_train.items():\n",
366
+ " adfuller_test(column, name=name)\n",
367
+ " print('\\n')"
368
+ ]
369
+ },
370
+ {
371
+ "cell_type": "markdown",
372
+ "metadata": {
373
+ "id": "I_fi-I9Pjnbs"
374
+ },
375
+ "source": [
376
+ "### Since the series is non stationary we will perform differencing and run the ADF test again\n"
377
+ ]
378
+ },
379
+ {
380
+ "cell_type": "code",
381
+ "execution_count": null,
382
+ "metadata": {
383
+ "id": "2-xfJUfmjnbs"
384
+ },
385
+ "outputs": [],
386
+ "source": [
387
+ "df_differenced = df_train.diff().dropna()"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": null,
393
+ "metadata": {
394
+ "id": "274M3lJAjnbt"
395
+ },
396
+ "outputs": [],
397
+ "source": [
398
+ "for name, column in df_differenced.items():\n",
399
+ " adfuller_test(column, name=name)\n",
400
+ " print('\\n')"
401
+ ]
402
+ },
403
+ {
404
+ "cell_type": "markdown",
405
+ "metadata": {
406
+ "id": "lUtlnD9jjnbt"
407
+ },
408
+ "source": [
409
+ "### Selecting Lag Order (p) for VAR model\n"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "code",
414
+ "execution_count": null,
415
+ "metadata": {
416
+ "id": "J1nq05G0jnbt"
417
+ },
418
+ "outputs": [],
419
+ "source": [
420
+ "model = VAR(df_differenced)\n",
421
+ "for i in [1,2,3,4,5,6,7,8,9]:\n",
422
+ " result = model.fit(i)\n",
423
+ " print('Lag Order =', i)\n",
424
+ " print('AIC : ', result.aic)\n",
425
+ " print('BIC : ', result.bic)\n",
426
+ " print('FPE : ', result.fpe)\n",
427
+ " print('HQIC: ', result.hqic, '\\n')\n",
428
+ "\n",
429
+ "x = model.select_order(maxlags=12)\n",
430
+ "x.summary()"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "markdown",
435
+ "metadata": {
436
+ "id": "SolEp3_sjnbt"
437
+ },
438
+ "source": [
439
+ "## Model Training\n"
440
+ ]
441
+ },
442
+ {
443
+ "cell_type": "code",
444
+ "execution_count": null,
445
+ "metadata": {
446
+ "id": "5nlbY2WAjnbt"
447
+ },
448
+ "outputs": [],
449
+ "source": [
450
+ "model_fitted = model.fit(5)\n",
451
+ "model_fitted.summary()"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "markdown",
456
+ "metadata": {
457
+ "id": "ecIylS0ijnbu"
458
+ },
459
+ "source": [
460
+ "## Durbin Watson Test\n",
461
+ "\n",
462
+ "The Durbin–Watson statistic is a test statistic used to detect the presence of autocorrelation at lag 1 in the residuals (prediction errors) from a regression analysis.\n",
463
+ "It is named after James Durbin and Geoffrey Watson.\n",
464
+ "The small sample distribution of this ratio was derived by John von Neumann (von Neumann, 1941).\n",
465
+ "Durbin and Watson (1950, 1951) applied this statistic to the residuals from least squares regressions, and developed bounds tests for the null hypothesis that the errors are serially uncorrelated against the alternative that they follow a first order autoregressive process.\n",
466
+ "Note that the distribution of this test statistic does not depend on the estimated regression coefficients and the variance of the errors.\n"
467
+ ]
468
+ },
469
+ {
470
+ "cell_type": "code",
471
+ "execution_count": null,
472
+ "metadata": {
473
+ "id": "OjrrFXu_jnbu"
474
+ },
475
+ "outputs": [],
476
+ "source": [
477
+ "out = durbin_watson(model_fitted.resid)\n",
478
+ "\n",
479
+ "for col, val in zip(df.columns, out):\n",
480
+ " print(col, ':', round(val, 2))"
481
+ ]
482
+ },
483
+ {
484
+ "cell_type": "markdown",
485
+ "metadata": {
486
+ "id": "1yoCIdbBjnbu"
487
+ },
488
+ "source": [
489
+ "### Forecasting\n"
490
+ ]
491
+ },
492
+ {
493
+ "cell_type": "code",
494
+ "execution_count": null,
495
+ "metadata": {
496
+ "id": "LPQNznqtjnbu"
497
+ },
498
+ "outputs": [],
499
+ "source": [
500
+ "# Get the lag order\n",
501
+ "lag_order = model_fitted.k_ar\n",
502
+ "print(lag_order)\n",
503
+ "\n",
504
+ "# Input data for forecasting\n",
505
+ "forecast_input = df_differenced.values[-lag_order:]\n",
506
+ "print(forecast_input)\n",
507
+ "\n",
508
+ "fc = model_fitted.forecast(y=forecast_input, steps=nobs)\n",
509
+ "df_forecast = pd.DataFrame(fc, index=df.index[-nobs:], columns=df.columns + '_1d')\n",
510
+ "df_forecast"
511
+ ]
512
+ },
513
+ {
514
+ "cell_type": "markdown",
515
+ "metadata": {
516
+ "id": "Em5XqOHajnbu"
517
+ },
518
+ "source": [
519
+ "## Inversion of differencing\n"
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": null,
525
+ "metadata": {
526
+ "id": "0F1Kit38jnbu"
527
+ },
528
+ "outputs": [],
529
+ "source": [
530
+ "def invert_transformation(df_train, df_forecast, second_diff=False):\n",
531
+ " df_fc = df_forecast.copy()\n",
532
+ " columns = df_train.columns\n",
533
+ " for col in columns:\n",
534
+ " # Roll back 2nd Diff\n",
535
+ " if second_diff:\n",
536
+ " df_fc[str(col)+'_1d'] = (df_train[col].iloc[-1]-df_train[col].iloc[-2]) + df_fc[str(col)+'_2d'].cumsum()\n",
537
+ " # Roll back 1st Diff\n",
538
+ " df_fc[str(col)+'_forecast'] = df_train[col].iloc[-1] + df_fc[str(col)+'_1d'].cumsum()\n",
539
+ " return df_fc\n",
540
+ "\n",
541
+ "df_results = invert_transformation(df_train, df_forecast, second_diff=False)\n",
542
+ "df_results.loc[:, ['adjclose_forecast', 'neg_forecast', 'pos_forecast', 'neu_forecast']]"
543
+ ]
544
+ },
545
+ {
546
+ "cell_type": "markdown",
547
+ "metadata": {
548
+ "id": "dlYPfmnPjnbu"
549
+ },
550
+ "source": [
551
+ "## Plot Forcast\n"
552
+ ]
553
+ },
554
+ {
555
+ "cell_type": "code",
556
+ "execution_count": null,
557
+ "metadata": {
558
+ "id": "UemTi70xjnbv"
559
+ },
560
+ "outputs": [],
561
+ "source": [
562
+ "fig, axes = plt.subplots(nrows=len(df.columns), ncols=1, dpi=150, figsize=(10,10))\n",
563
+ "for i, (col,ax) in enumerate(zip(df.columns, axes.flatten())):\n",
564
+ " df_results[col+'_forecast'].plot(legend=True, ax=ax).autoscale(axis='x',tight=True)\n",
565
+ " df_test[col][-nobs:].plot(legend=True, ax=ax)\n",
566
+ " ax.set_title(col + \": Forecast vs Actuals\")\n",
567
+ " ax.xaxis.set_ticks_position('none')\n",
568
+ " ax.yaxis.set_ticks_position('none')\n",
569
+ " ax.spines[\"top\"].set_alpha(0)\n",
570
+ " ax.tick_params(labelsize=6)\n",
571
+ "\n",
572
+ "plt.tight_layout()\n",
573
+ "plt.show()"
574
+ ]
575
+ },
576
+ {
577
+ "cell_type": "markdown",
578
+ "metadata": {
579
+ "id": "xTUG-kBsjnbv"
580
+ },
581
+ "source": [
582
+ "## Error of Forecast\n"
583
+ ]
584
+ },
585
+ {
586
+ "cell_type": "code",
587
+ "execution_count": null,
588
+ "metadata": {
589
+ "id": "VrOa5_hPjnbv"
590
+ },
591
+ "outputs": [],
592
+ "source": [
593
+ "def forecast_accuracy(forecast, actual):\n",
594
+ " mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE\n",
595
+ " me = np.mean(forecast - actual) # ME\n",
596
+ " mae = np.mean(np.abs(forecast - actual)) # MAE\n",
597
+ " mpe = np.mean((forecast - actual)/actual) # MPE\n",
598
+ " rmse = np.mean((forecast - actual)**2)**.5 # RMSE\n",
599
+ " corr = np.corrcoef(forecast, actual)[0,1] # corr\n",
600
+ " mins = np.amin(np.hstack([forecast[:,None],\n",
601
+ " actual[:,None]]), axis=1)\n",
602
+ " maxs = np.amax(np.hstack([forecast[:,None],\n",
603
+ " actual[:,None]]), axis=1)\n",
604
+ " minmax = 1 - np.mean(mins/maxs) # minmax\n",
605
+ " return({'mape':mape, 'me':me, 'mae': mae,\n",
606
+ " 'mpe': mpe, 'rmse':rmse, 'corr':corr, 'minmax':minmax})\n",
607
+ "\n",
608
+ "print('Forecast Accuracy of: adjclose')\n",
609
+ "accuracy_prod = forecast_accuracy(df_results['adjclose_forecast'].values, df_test['adjclose'].values)\n",
610
+ "for k, v in accuracy_prod.items():\n",
611
+ " print(k, ': ', round(v,4))\n",
612
+ "\n",
613
+ "print('\\nForecast Accuracy of: pos')\n",
614
+ "accuracy_prod = forecast_accuracy(df_results['pos_forecast'].values, df_test['pos'].values)\n",
615
+ "for k, v in accuracy_prod.items():\n",
616
+ " print(k, ': ', round(v,4))\n",
617
+ "\n",
618
+ "print('\\nForecast Accuracy of: neg')\n",
619
+ "accuracy_prod = forecast_accuracy(df_results['neg_forecast'].values, df_test['neg'].values)\n",
620
+ "for k, v in accuracy_prod.items():\n",
621
+ " print(k, ': ', round(v,4))\n",
622
+ "\n",
623
+ "print('\\nForecast Accuracy of: neu')\n",
624
+ "accuracy_prod = forecast_accuracy(df_results['neu_forecast'].values, df_test['neu'].values)\n",
625
+ "for k, v in accuracy_prod.items():\n",
626
+ " print(k, ': ', round(v,4))"
627
+ ]
628
+ }
629
+ ],
630
+ "metadata": {
631
+ "colab": {
632
+ "provenance": []
633
+ },
634
+ "kernelspec": {
635
+ "display_name": "Python 3",
636
+ "language": "python",
637
+ "name": "python3"
638
+ },
639
+ "language_info": {
640
+ "codemirror_mode": {
641
+ "name": "ipython",
642
+ "version": 3
643
+ },
644
+ "file_extension": ".py",
645
+ "mimetype": "text/x-python",
646
+ "name": "python",
647
+ "nbconvert_exporter": "python",
648
+ "pygments_lexer": "ipython3",
649
+ "version": "3.11.8"
650
+ }
651
+ },
652
+ "nbformat": 4,
653
+ "nbformat_minor": 0
654
+ }
voila.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Voila": {
3
+ "ip":"0.0.0.0",
4
+ "port": 7860,
5
+ "tornado_settings": {
6
+ "headers": {
7
+ "Content-Security-Policy": "frame-ancestors self *"
8
+ }
9
+ }
10
+ }
11
+ }