資源簡介
剪枝壓縮剪枝壓縮剪枝壓縮剪枝壓縮剪枝壓縮剪枝壓縮剪枝壓縮剪枝壓縮剪枝壓縮
代碼片段和文件信息
#?coding:utf-8
#?by?chen?yh
import?caffe
import?numpy?as?np
import?shutil
import?matplotlib.pyplot?as?plt
‘‘‘
These?parameters?need?modification:?
????root:?root?directory?;
????model:?your?caffemodel?;?
????prototxt:?your?prototxt?;?
????prune?layer:?need?prune?layer?name?a?list?;
????input?layer:?input?of?these?layers?is?output?of?prune?layereach?element?is?a?list?;
????th?:?thereshold?of?each?prune?layera?list.
Please?ensure?lenth?of?prune?layer?input?layer?and?th.
????
picture?and?get?get_sum_l1?functions?can?help?you?find?suitable?threshold.?
‘‘‘
def?get_prune(netlayerthreshold):?#?返回layer中低于閾值threshold的卷積核的序號
????weight_ori=net.params[layer][0].data
????#bias_ori=net.params[layer][1].data
????sum_l1?=?[]
????for?i?in?range(weight_ori.shape[0]):
????????sum_l1.append((inp.sum(abs(weight_ori[i?:?:?:]))))#sum_l1存放每個卷積核的所有權重絕對值之和
????de_keral=[]?#de_keral存放大于閾值的卷積核的序號
????for?i?in?sum_l1:
????????if?i[1]>threshold:
????????????de_keral.append(i[0])
????print?layer?+?“層需要prune的卷積核有“?+?str(weight_ori.shape[0]-len(de_keral))?+?“個保留的卷積核有“?+?str(len(de_keral))?+?“個“
????return?de_keral
def?prune(netpklk):?#?輸出兩個字典鍵都是修剪層的layer的名字值分別是修剪層的weight和bias
????w_new={}?#鍵是layer值是保存后的weight
????b_new={}?#鍵是layer值是保存后的bias
????for?l?in?pk.keys():?#待剪層權重處理?w_n?=?w[pk[l]:;;]
????????w_old?=?net.params[l][0].data
????????b_old?=?net.params[l][1].data
????????w_n?=?w_old[pk[l]:::]
????????b_n?=?b_old[pk[l]]
????????w_new[l]?=?w_n
????????b_new[l]?=?b_n
????????#?net_n.params[l][0].data[...]?=?w_n
????????#?net_n.params[l][1].data[...]?=?b_n
????for?l?in?lk.keys():#以待剪層為輸入的層權重處理
????????if?l?not?in?pk.keys():?#?bottom被修剪后本身沒有被修剪所以其權重只需要在原來的net上面取切片w_n?=?w[:lk[l]::]
????????????if?l?!=?“conv4_3_norm“:?#對傳統卷積層的處理
????????????????w_o?=?net.params[l][0].data
????????????????b_o?=?net.params[l][1].data
????????????????b_new[l]?=?b_o?#?bias保留因為這些層沒有剪卷積核
????????????????w_n?=?w_o[:?lk[l]?:?:]
????????????????w_new[l]?=?w_n
????????????else:?#對特殊層的處理參數個數不是2
????????????????w_o?=?net.params[l][0].data
????????????????w_n?=?w_o[lk[l]]
????????????????w_new[l]?=?w_n
????????else:?#pk?和?lk共有的層也就是這層的bottom和層本身都被修剪過所以權重不能在原來的net上切片利用保存了的w_new取切片.
????????????w_o?=?w_new[l]
????????????w_n?=?w_o[:lk[l]::]
????????????w_new[l]?=?w_n
????return?w_newb_new
def?get_prototxt(pkpro_n):?#復制原來的prototxt并修改修剪層的num_output這一段代碼有點繞有空的話優化為幾個單獨的函數或者弄個類
????with?open(pro_n“r“)?as?p:
????????lines?=?p.readlines()
????k=0
????with?open(pro_n“w“)?as?p:
????????while?k?????????????if?‘name:‘?in?lines[k]:
????????????????l_name?=?lines[k].split(‘“‘)[1]?#獲取layer?name
????????????????if?l_name?in?pk.keys():?#如果name在待修剪層中則需要修改下面進入一個找channel的循環塊.
????????????????????while?True:
????????????????????????if?“num_output:“?in?lines[k]:
????????????????????????????channel_n?=?“????num_output:?“+str(len(pk[l_name]))+“\n“
????????????????????????????p.write(channel_n)
????????????????????????????k=k+1
???????????
評論
共有 條評論