diff --git a/benchmarks/ProbabilityDistribution.py b/benchmarks/ProbabilityDistribution.py index 199e8f1..29bdad0 100644 --- a/benchmarks/ProbabilityDistribution.py +++ b/benchmarks/ProbabilityDistribution.py @@ -43,6 +43,16 @@ class ProbabilityDistribution(object): for k in self.bins]) return h + def crossentropy(self,q): + h = -sum([self.distribution[k] * np.log(q.distribution[k]) if self.distribution[k] > 0 else 0 + for k in self.bins]) + return h + + def kullbackleiblerdivergence(self,q): + h = sum([self.distribution[k] * np.log(self.distribution[k]/q.distribution[k]) if self.distribution[k] > 0 else 0 + for k in self.bins]) + return h + def empiricalloglikelihood(self): _s = 0 for k in self.bins: @@ -60,6 +70,16 @@ class ProbabilityDistribution(object): _s += np.log(k) return _s + def averageloglikelihood(self, data): + + densities = self.density(data) + + _s = 0 + for k in densities: + if k > 0: + _s += np.log(k) + return _s / len(data) + def plot(self,axis=None,color="black",tam=[10, 6]): if axis is None: fig = plt.figure(figsize=tam) diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py index b056c21..36729eb 100644 --- a/benchmarks/benchmarks.py +++ b/benchmarks/benchmarks.py @@ -119,7 +119,7 @@ def getPointStatistics(data, models, externalmodels = None, externalforecasts = ret += " 1 & " ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + " & " ret += str(round(Measures.smape(data, externalforecasts[k][:-1]), 2))+ " & " - ret += str(round(Measures.UStatistic(np.array(data), np.array(forecasts[:-1])), 2)) + ret += str(round(Measures.UStatistic(data, externalforecasts[k][:-1]), 2)) ret += " \\\\ \n" return ret diff --git a/tests/pfts.py b/tests/pfts.py index 05e5807..1321e11 100644 --- a/tests/pfts.py +++ b/tests/pfts.py @@ -47,9 +47,21 @@ from pyFTS.benchmarks import ProbabilityDistribution as dist forecasts = pfts1.forecast(taiex_treino) -pmf1 = dist.ProbabilityDistribution("Original",10,[min(taiex_treino),max(taiex_treino)],data=forecasts) +pmf1 = dist.ProbabilityDistribution("Original",100,[min(taiex_treino),max(taiex_treino)],data=taiex_treino) -print(pmf1) +#print(pmf1.entropy()) + +pmf2 = dist.ProbabilityDistribution("Original",100,[min(taiex_treino),max(taiex_treino)],data=forecasts) + +#print(pmf2.entropy()) + +#print(pmf2.kullbackleiblerdivergence(pmf1)) + +#print(pmf2.crossentropy(pmf1)) + +print(pmf1.averageloglikelihood(taiex_treino)) + +print(pmf2.averageloglikelihood(taiex_treino)) #pfts2 = pfts.ProbabilisticFTS("n = 2") #pfts2.appendTransformation(diff)