資源簡介
高等數學規劃課程中需要用到的karmarkar標準型的轉換以及投影尺度算法的實現,輸出過程目標函數值和karmarkar函數值,迭代過程圖表。##注意輸入是一般形式標準型的參數集。
代碼片段和文件信息
#!/usr/bin/env?python
#?-*-?coding:?utf-8?-*-
#?@Date????:?2019-10-11?17:21:42
#?@Author??:?${QU?JINMING}
#?@Email????:?${qjming97@163.com}
import?os
import?math
import?sklearn
import?numpy?as?np
import?csv
import?matplotlib.pyplot?as?plt
def?trans_to_karmarkar(a?b?c?m?n):
????#?約束矩陣A?等式右端b(列向量)?目標參數c(列向量)?約束數量m?變量數量n
????at?=?np.transpose(a)
????ct?=?np.transpose(c)
????bt?=?np.transpose(b)
????fu_b?=?-1?*?b
????fu_c?=?-1?*?c
????fu_at?=?-1*at
????fu_bt?=?-1*bt
????i_n?=?np.identity(n)
????zero_1?=?np.zeros((m?2*m+n))??#?第一行的0
????zero_2?=?np.zeros((n?n))??#?第二行的0
????zero_3?=?np.zeros((1?n))??#?第三行的0
????#?print(zero_3)
????zero_4?=?np.array([[0]])
????e_total?=?np.ones(2*m+2*n)
????e_last?=?np.ones((1?2*m+2*n+2))
????#?print(b.shapec.shapezero_3.shape)
????b_gang?=?np.concatenate((b?c?zero_4)?axis=0)
????#?print(b_gang.shape)
????fu_b_bang?=?-1*b_gang
????#?按行拼接再按列拼接
????A_plus1?=?np.concatenate((a?zero_1)?axis=1)
????#?print(A_plus1.shape)
????A_plus2?=?np.concatenate((zero_2?at?fu_at?i_n)?axis=1)
????#?print(A_plus2.shape)
????A_plus3?=?np.concatenate((ct?fu_bt?bt?zero_3)?axis=1)
????#?print(A_plus3.shape)
????A_plus?=?np.concatenate((A_plus1?A_plus2?A_plus3)?axis=0)
????#?print(A_plus.shape)
????B_ae?=?b_gang?-?np.reshape(np.dot(A_plus?e_total)?(m+n+1?1))
????#?print(B_ae.shape)
????B_1?=?np.concatenate((A_plus?B_ae?fu_b_bang)?axis=1)
????#?print(B_1.shape)
????B?=?np.concatenate((B_1?e_last)?axis=0)
????c_aim_1?=?np.zeros((2*m+2*n?1))
????temp?=?np.array([[1]?[0]])
????c_aim?=?np.concatenate((c_aim_1?temp)?axis=0)
????return?B?c_aim
def?karmarkar(B?c_aim):
????#?近似解的效果嚴重依賴于參數L?和?步長alpha
????#?收斂條件可采用L參數法?或比較目標函數值的變動比率
????#?調節兩個參數的時候?alpha相當于learning?rate?盡量小
????#?L或者變動比率?L越小即絕對值越大收斂條件越苛刻?容易出現步長越界
????#?同理變動比率越小越容易越界?但越容易逼近目標最優值
????m?=?len(B)
????n?=?len(B[1])
????A?=?B[:m-1]
????#?print(“輸入A“A)
????r?=?1/math.sqrt(n*(n-1))
????#?print(‘r‘r)
????#?print(A)
????L?=?-22
????alpha?=?1/32
????X_0?=?np.ones((n?1))/n
????#?print(“x0“X_0)
????#?X_0=np.array([[0.6220][1/3][0.0447]])
????Y_ORIGINAL?=?np.dot(np.transpose(c_aim)?X_0)
????#?print(“y00“Y_ORIGINAL)
????FLAG?=?Y_ORIGINAL*math.pow(2?L)??#?這個是教材給的收斂條件用的
????#?print(“FLAG“FLAG)
????Y_each=[]
????X_each=[]
????print(“--------------迭代開始---------------\n“)
????while?1:
????????X_1?=?karmarkar_one_step(X_0?n?A?c_aim?alpha?r)
????????Y_0?=?np.dot(np.transpose(c_aim)?X_0)
????????Y_1?=?np.dot(np.transpose(c_aim)?X_1)
????????assert?Y_1?
????????X_0?=?X_1
????????Y_each.append(Y_1)
????????X_each.append(X_1)
????????print(“當前目標函數值:“?Y_1)
????????bias?=?(Y_0-Y_1)/Y_1
????????if?bias?0.0014:
????????????print(“--------------迭代結束---------------“)
????????????print(“最優近似變量值\n“?X_1?“\n最優近似?karmarkar目標值\n“?Y_1)
????????????break
????????else:
????????????continue
????X_LAST?=?X_1
??
評論
共有 條評論