CPI VT
CPI VT
r<-CPI_U$r
library(carData)
library(zoo)
##
## Attaching package: 'zoo'
library(foreign)
library(dynlm)
library(sandwich)
library(car)
library(ggplot2)
library(forecast)
library(tseries)
library(urca)
summary(r)
plot(r)
r.ts=decompose(r, type = c("additive"), filter = NULL)
autoplot(r.ts)
r.ts1=decompose(r, type = c("multiplicative"), filter = NULL)
autoplot(r.ts1)
Box.test(r)
##
## Box-Pierce test
##
## data: r
## X-squared = 1.3714, df = 1, p-value = 0.2416
adf.test(r)
##
## Augmented Dickey-Fuller Test
##
## data: r
## Dickey-Fuller = -11.482, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
acf(r)
pacf(r)
##
## Call:
## arima(x = (window(r, end = c(2023, 12))), order = c(1, 1, 1),
seasonal = list(order = c(1,
## 1, 1), period = 12), method = c("CSS-ML", "ML", "CSS"),
optim.control = list(maxit = 1000))
##
## Coefficients:
## ar1 ma1 sar1 sma1
## -0.2962 -1.0000 0.2027 -0.9216
## s.e. 0.0530 0.0121 0.0657 0.0456
##
## sigma^2 estimated as 0.003386: log likelihood = 453.4, aic = -
896.8
##
## Training set error measures:
## ME RMSE MAE MPE MAPE
MASE
## Training set -0.001008867 0.05707262 0.04176693 9.931916 184.0122
0.4199934
## ACF1
## Training set -0.07742582
##
## Call:
## arima(x = (window(r, end = c(2023, 12))), order = c(1, 1, 2),
seasonal = list(order = c(1,
## 1, 1), period = 12), method = c("CSS-ML", "ML", "CSS"),
optim.control = list(maxit = 1000))
##
## Coefficients:
## ar1 ma1 ma2 sar1 sma1
## 0.2136 -1.6121 0.6121 0.2111 -0.9504
## s.e. 0.1101 0.0895 0.0885 0.0650 0.0588
##
## sigma^2 estimated as 0.003133: log likelihood = 463.33, aic = -
914.66
##
## Training set error measures:
## ME RMSE MAE MPE MAPE
MASE
## Training set -0.0005491916 0.05489814 0.04009895 8.325093 181.8746
0.4032208
## ACF1
## Training set 0.002681089
##
## Call:
## arima(x = (window(r, end = c(2023, 12))), order = c(2, 1, 2),
seasonal = list(order = c(1,
## 1, 1), period = 12), method = c("CSS-ML", "ML", "CSS"),
optim.control = list(maxit = 1000))
##
## Coefficients:
## ar1 ar2 ma1 ma2 sar1 sma1
## 0.0760 -0.0952 -1.4672 0.4672 0.2055 -0.9553
## s.e. 0.2668 0.1196 0.2684 0.2680 0.0660 0.0638
##
## sigma^2 estimated as 0.003115: log likelihood = 463.76, aic = -
913.53
##
## Training set error measures:
## ME RMSE MAE MPE MAPE
MASE
## Training set -0.0006654801 0.05474198 0.0398714 8.575361 180.2003
0.4009326
## ACF1
## Training set 0.002218171
##
## Call:
## arima(x = (window(r, end = c(2023, 12))), order = c(2, 1, 1),
seasonal = list(order = c(1,
## 1, 1), period = 12), method = c("CSS-ML", "ML", "CSS"),
optim.control = list(maxit = 1000))
##
## Coefficients:
## ar1 ar2 ma1 sar1 sma1
## -0.3713 -0.2506 -1.0000 0.1851 -0.9413
## s.e. 0.0540 0.0539 0.0139 0.0649 0.0521
##
## sigma^2 estimated as 0.003138: log likelihood = 463.84, aic = -
915.68
##
## Training set error measures:
## ME RMSE MAE MPE MAPE
MASE
## Training set -0.0008899748 0.05493885 0.03989064 12.3036 179.5279
0.4011261
## ACF1
## Training set 0.0003392887
acf(reg1$residuals)
pacf(reg1$residuals)
adf.test(reg1$residuals)
##
## Augmented Dickey-Fuller Test
##
## data: reg1$residuals
## Dickey-Fuller = -8.5031, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
Box.test(reg1$residuals)
##
## Box-Pierce test
##
## data: reg1$residuals
## X-squared = 2.0442, df = 1, p-value = 0.1528
autoplot(reg1)
plot(forecast(reg1,h=60))
lines(r)