您好,登錄后才能下訂單哦!
小編給大家分享一下python如何隱馬爾科夫模型HMM,相信大部分人都還不怎么了解,因此分享這篇文章給大家參考一下,希望大家閱讀完這篇文章后大有收獲,下面讓我們一起去了解一下吧!
具體內容如下
#coding=utf8 ''''' Created on 2017-8-5 里面的代碼許多地方可以精簡,但為了百分百還原公式,就沒有精簡了。 @author: adzhua ''' import numpy as np class HMM(object): def __init__(self, A, B, pi): ''''' A: 狀態轉移概率矩陣 B: 輸出觀察概率矩陣 pi: 初始化狀態向量 ''' self.A = np.array(A) self.B = np.array(B) self.pi = np.array(pi) self.N = self.A.shape[0] # 總共狀態個數 self.M = self.B.shape[1] # 總共觀察值個數 # 輸出HMM的參數信息 def printHMM(self): print ("==================================================") print ("HMM content: N =",self.N,",M =",self.M) for i in range(self.N): if i==0: print ("hmm.A ",self.A[i,:]," hmm.B ",self.B[i,:]) else: print (" ",self.A[i,:]," ",self.B[i,:]) print ("hmm.pi",self.pi) print ("==================================================") # 前向算法 def forwar(self, T, O, alpha, prob): ''''' T: 觀察序列的長度 O: 觀察序列 alpha: 運算中用到的臨時數組 prob: 返回值所要求的概率 ''' # 初始化 for i in range(self.N): alpha[0, i] = self.pi[i] * self.B[i, O[0]] # 遞歸 for t in range(T-1): for j in range(self.N): sum = 0.0 for i in range(self.N): sum += alpha[t, i] * self.A[i, j] alpha[t+1, j] = sum * self.B[j, O[t+1]] # 終止 sum = 0.0 for i in range(self.N): sum += alpha[T-1, i] prob[0] *= sum # 帶修正的前向算法 def forwardWithScale(self, T, O, alpha, scale, prob): scale[0] = 0.0 # 初始化 for i in range(self.N): alpha[0, i] = self.pi[i] * self.B[i, O[0]] scale[0] += alpha[0, i] for i in range(self.N): alpha[0, i] /= scale[0] # 遞歸 for t in range(T-1): scale[t+1] = 0.0 for j in range(self.N): sum = 0.0 for i in range(self.N): sum += alpha[t, i] * self.A[i, j] alpha[t+1, j] = sum * self.B[j, O[t+1]] scale[t+1] += alpha[t+1, j] for j in range(self.N): alpha[t+1, j] /= scale[t+1] # 終止 for t in range(T): prob[0] += np.log(scale[t]) def back(self, T, O, beta, prob): ''''' T: 觀察序列的長度 len(O) O: 觀察序列 beta: 計算時用到的臨時數組 prob: 返回值;所要求的概率 ''' # 初始化 for i in range(self.N): beta[T-1, i] = 1.0 # 遞歸 for t in range(T-2, -1, -1): # 從T-2開始遞減;即T-2, T-3, T-4, ..., 0 for i in range(self.N): sum = 0.0 for j in range(self.N): sum += self.A[i, j] * self.B[j, O[t+1]] * beta[t+1, j] beta[t, i] = sum # 終止 sum = 0.0 for i in range(self.N): sum += self.pi[i]*self.B[i,O[0]]*beta[0,i] prob[0] = sum # 帶修正的后向算法 def backwardWithScale(self, T, O, beta, scale): ''''' T: 觀察序列的長度 len(O) O: 觀察序列 beta: 計算時用到的臨時數組 ''' # 初始化 for i in range(self.N): beta[T-1, i] = 1.0 # 遞歸 for t in range(T-2, -1, -1): for i in range(self.N): sum = 0.0 for j in range(self.N): sum += self.A[i, j] * self.B[j, O[t+1]] * beta[t+1, j] beta[t, i] = sum / scale[t+1] # viterbi算法 def viterbi(self, O): ''''' O: 觀察序列 ''' T = len(O) # 初始化 delta = np.zeros((T, self.N), np.float) phi = np.zeros((T, self.N), np.float) I = np.zeros(T) for i in range(self.N): delta[0, i] = self.pi[i] * self.B[i, O[0]] phi[0, i] = 0.0 # 遞歸 for t in range(1, T): for i in range(self.N): delta[t, i] = self.B[i, O[t]] * np.array([delta[t-1, j] * self.A[j, i] for j in range(self.N)] ).max() phi = np.array([delta[t-1, j] * self.A[j, i] for j in range(self.N)]).argmax() # 終止 prob = delta[T-1, :].max() I[T-1] = delta[T-1, :].argmax() for t in range(T-2, -1, -1): I[t] = phi[I[t+1]] return prob, I # 計算gamma(計算A所需的分母;詳情見李航的統計學習) : 時刻t時馬爾可夫鏈處于狀態Si的概率 def computeGamma(self, T, alpha, beta, gamma): '''''''' for t in range(T): for i in range(self.N): sum = 0.0 for j in range(self.N): sum += alpha[t, j] * beta[t, j] gamma[t, i] = (alpha[t, i] * beta[t, i]) / sum # 計算sai(i,j)(計算A所需的分子) 為給定訓練序列O和模型lambda時 def computeXi(self, T, O, alpha, beta, Xi): for t in range(T-1): sum = 0.0 for i in range(self.N): for j in range(self.N): Xi[t, i, j] = alpha[t, i] * self.A[i, j] * self.B[j, O[t+1]] * beta[t+1, j] sum += Xi[t, i, j] for i in range(self.N): for j in range(self.N): Xi[t, i, j] /= sum # 輸入 L個觀察序列O,初始模型:HMM={A,B,pi,N,M} def BaumWelch(self, L, T, O, alpha, beta, gamma): DELTA = 0.01 ; round = 0 ; flag = 1 ; probf = [0.0] delta = 0.0; probprev = 0.0 ; ratio = 0.0 ; deltaprev = 10e-70 xi = np.zeros((T, self.N, self.N)) # 計算A的分子 pi = np.zeros((T), np.float) # 狀態初始化概率 denominatorA = np.zeros((self.N), np.float) # 輔助計算A的分母的變量 denominatorB = np.zeros((self.N), np.float) numeratorA = np.zeros((self.N, self.N), np.float) # 輔助計算A的分子的變量 numeratorB = np.zeros((self.N, self.M), np.float) # 針對輸出觀察概率矩陣 scale = np.zeros((T), np.float) while True: probf[0] =0 # E_step for l in range(L): self.forwardWithScale(T, O[l], alpha, scale, probf) self.backwardWithScale(T, O[l], beta, scale) self.computeGamma(T, alpha, beta, gamma) # (t, i) self.computeXi(T, O[l], alpha, beta, xi) #(t, i, j) for i in range(self.N): pi[i] += gamma[0, i] for t in range(T-1): denominatorA[i] += gamma[t, i] denominatorB[i] += gamma[t, i] denominatorB[i] += gamma[T-1, i] for j in range(self.N): for t in range(T-1): numeratorA[i, j] += xi[t, i, j] for k in range(self.M): # M為觀察狀態取值個數 for t in range(T): if O[l][t] == k: numeratorB[i, k] += gamma[t, i] # M_step。 計算pi, A, B for i in range(self.N): # 這個for循環也可以放到for l in range(L)里面 self.pi[i] = 0.001 / self.N + 0.999 * pi[i] / L for j in range(self.N): self.A[i, j] = 0.001 / self.N + 0.999 * numeratorA[i, j] / denominatorA[i] numeratorA[i, j] = 0.0 for k in range(self.M): self.B[i, k] = 0.001 / self.N + 0.999 * numeratorB[i, k] / denominatorB[i] numeratorB[i, k] = 0.0 #重置 pi[i] = denominatorA[i] = denominatorB[i] = 0.0 if flag == 1: flag = 0 probprev = probf[0] ratio = 1 continue delta = probf[0] - probprev ratio = delta / deltaprev probprev = probf[0] deltaprev = delta round += 1 if ratio <= DELTA : print('num iteration: ', round) break if __name__ == '__main__': print ("python my HMM") # 初始的狀態概率矩陣pi;狀態轉移矩陣A;輸出觀察概率矩陣B; 觀察序列 pi = [0.5,0.5] A = [[0.8125,0.1875],[0.2,0.8]] B = [[0.875,0.125],[0.25,0.75]] O = [ [1,0,0,1,1,0,0,0,0], [1,1,0,1,0,0,1,1,0], [0,0,1,1,0,0,1,1,1] ] L = len(O) T = len(O[0]) # T等于最長序列的長度就好了 hmm = HMM(A, B, pi) alpha = np.zeros((T,hmm.N),np.float) beta = np.zeros((T,hmm.N),np.float) gamma = np.zeros((T,hmm.N),np.float) # 訓練 hmm.BaumWelch(L,T,O,alpha,beta,gamma) # 輸出HMM參數信息 hmm.printHMM()
以上是“python如何隱馬爾科夫模型HMM”這篇文章的所有內容,感謝各位的閱讀!相信大家都有了一定的了解,希望分享的內容對大家有所幫助,如果還想學習更多知識,歡迎關注億速云行業資訊頻道!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。