Probability distributions metrics
This commit is contained in:
parent
47b1cb57c9
commit
ef84d50346
@ -43,6 +43,16 @@ class ProbabilityDistribution(object):
|
|||||||
for k in self.bins])
|
for k in self.bins])
|
||||||
return h
|
return h
|
||||||
|
|
||||||
|
def crossentropy(self,q):
|
||||||
|
h = -sum([self.distribution[k] * np.log(q.distribution[k]) if self.distribution[k] > 0 else 0
|
||||||
|
for k in self.bins])
|
||||||
|
return h
|
||||||
|
|
||||||
|
def kullbackleiblerdivergence(self,q):
|
||||||
|
h = sum([self.distribution[k] * np.log(self.distribution[k]/q.distribution[k]) if self.distribution[k] > 0 else 0
|
||||||
|
for k in self.bins])
|
||||||
|
return h
|
||||||
|
|
||||||
def empiricalloglikelihood(self):
|
def empiricalloglikelihood(self):
|
||||||
_s = 0
|
_s = 0
|
||||||
for k in self.bins:
|
for k in self.bins:
|
||||||
@ -60,6 +70,16 @@ class ProbabilityDistribution(object):
|
|||||||
_s += np.log(k)
|
_s += np.log(k)
|
||||||
return _s
|
return _s
|
||||||
|
|
||||||
|
def averageloglikelihood(self, data):
|
||||||
|
|
||||||
|
densities = self.density(data)
|
||||||
|
|
||||||
|
_s = 0
|
||||||
|
for k in densities:
|
||||||
|
if k > 0:
|
||||||
|
_s += np.log(k)
|
||||||
|
return _s / len(data)
|
||||||
|
|
||||||
def plot(self,axis=None,color="black",tam=[10, 6]):
|
def plot(self,axis=None,color="black",tam=[10, 6]):
|
||||||
if axis is None:
|
if axis is None:
|
||||||
fig = plt.figure(figsize=tam)
|
fig = plt.figure(figsize=tam)
|
||||||
|
@ -119,7 +119,7 @@ def getPointStatistics(data, models, externalmodels = None, externalforecasts =
|
|||||||
ret += " 1 & "
|
ret += " 1 & "
|
||||||
ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + " & "
|
ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + " & "
|
||||||
ret += str(round(Measures.smape(data, externalforecasts[k][:-1]), 2))+ " & "
|
ret += str(round(Measures.smape(data, externalforecasts[k][:-1]), 2))+ " & "
|
||||||
ret += str(round(Measures.UStatistic(np.array(data), np.array(forecasts[:-1])), 2))
|
ret += str(round(Measures.UStatistic(data, externalforecasts[k][:-1]), 2))
|
||||||
ret += " \\\\ \n"
|
ret += " \\\\ \n"
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
@ -47,9 +47,21 @@ from pyFTS.benchmarks import ProbabilityDistribution as dist
|
|||||||
|
|
||||||
forecasts = pfts1.forecast(taiex_treino)
|
forecasts = pfts1.forecast(taiex_treino)
|
||||||
|
|
||||||
pmf1 = dist.ProbabilityDistribution("Original",10,[min(taiex_treino),max(taiex_treino)],data=forecasts)
|
pmf1 = dist.ProbabilityDistribution("Original",100,[min(taiex_treino),max(taiex_treino)],data=taiex_treino)
|
||||||
|
|
||||||
print(pmf1)
|
#print(pmf1.entropy())
|
||||||
|
|
||||||
|
pmf2 = dist.ProbabilityDistribution("Original",100,[min(taiex_treino),max(taiex_treino)],data=forecasts)
|
||||||
|
|
||||||
|
#print(pmf2.entropy())
|
||||||
|
|
||||||
|
#print(pmf2.kullbackleiblerdivergence(pmf1))
|
||||||
|
|
||||||
|
#print(pmf2.crossentropy(pmf1))
|
||||||
|
|
||||||
|
print(pmf1.averageloglikelihood(taiex_treino))
|
||||||
|
|
||||||
|
print(pmf2.averageloglikelihood(taiex_treino))
|
||||||
|
|
||||||
#pfts2 = pfts.ProbabilisticFTS("n = 2")
|
#pfts2 = pfts.ProbabilisticFTS("n = 2")
|
||||||
#pfts2.appendTransformation(diff)
|
#pfts2.appendTransformation(diff)
|
||||||
|
Loading…
Reference in New Issue
Block a user