91av视频/亚洲h视频/操亚洲美女/外国一级黄色毛片 - 国产三级三级三级三级

資源簡介

資源截圖

代碼片段和文件信息


#?coding:?utf-8

#?In[1]:


#-*-?coding:utf-8?-*-
import?pandas?as?pd
import?numpy?as?np
import?matplotlib.pyplot?as?plt
get_ipython().run_line_magic(‘matplotlib‘?‘inline‘)
diabetes=pd.read_csv(r‘C:\Users\Administrator\Desktop\diabetes\Machine-Learning-with-Python-master\diabetes.csv‘)
print(diabetes.columns)


#?In[2]:


diabetes.head()


#?In[3]:


#“結果”是我們將要預測的特征,0意味著未患糖尿病,1意味著患有糖尿病。在768個數據點中,500個被標記為0268個標記為1。
print(diabetes.groupby(‘Outcome‘).size())


#?In[4]:


#顯示數據的維度
print(“dimennsion?of?diabetes?data:{}“.format(diabetes.shape))


#?In[5]:


import?seaborn?as?sns
sns.countplot(diabetes[‘Outcome‘]label=“Count“)


#?In[7]:


diabetes.info()


#?In[70]:


#首先用knn研究一下是否能夠確認模型的復雜度和精確度之間的關系
from?sklearn.model_selection?import?train_test_split
x_trainx_testy_trainy_test=train_test_split(diabetes.loc[:diabetes.columns?!=‘Outcome‘]diabetes[‘Outcome‘]stratify=diabetes[‘Outcome‘]random_state=66)
from?sklearn.neighbors?import?KNeighborsClassifier
training_accuracy=[]
test_accuracy=[]
#try?n_neighbors?from?1?to?10
neighbors_settings=range(111)
?
for?n_neighbors?in?neighbors_settings:
????#build?the?model
????knn=KNeighborsClassifier(n_neighbors=n_neighbors)
????knn.fit(x_trainy_train)
????#record?training?set?accuracy
????training_accuracy.append(knn.score(x_trainy_train))
????#record?test?set?accuracy
????test_accuracy.append(knn.score(x_testy_test))
plt.plot(neighbors_settingstraining_accuracylabel=“training?accuracy“)
plt.plot(neighbors_settingstest_accuracylabel=“test?accuracy“)
plt.ylabel(“Accuracy“)
plt.xlabel(“n_neighbors“)
plt.legend()
plt.savefig(‘knn_compare_model‘)


#?In[73]:


#邏輯回歸算法
#正則化參數C=1(默認值)的模型在訓練集上準確度為78%,在測試集上準確度為77%。
from?sklearn.?linear_model?import?LogisticRegression?
logreg=LogisticRegression().?fit(x_trainy_train)
print(“Training?set?score:{:.3f}“.?format(logreg.?score(x_train?y_train)))#精確到小數點后三位
print(“Test?set?score:{:.3f}“.?format(logreg.?score(x_testy_test)))


#?In[74]:


#而將正則化參數C設置為100時,模型在訓練集上準確度稍有提高但測試集上準確度略降,
#說明較少正則化和更復雜的模型并不一定會比默認參數模型的預測效果更好。
#所以我們選擇默認值C=1
logreg100=LogisticRegression(C=100).?fit(x_trainy_train)
print(“Training?set?accuracy:{:.3f}“.?format(logreg100.?score(x_trainy_train)))
print(“Test?set?accuracy:{:.3f}“.?format(logreg100.?score(x_test?y_test)))


#?In[77]:


#用可視化的方式來看一下用三種不同正則化參數C所得模型的系數。
#更強的正則化(C?=?0.001)會使系數越來越接近于零。仔細地看圖,
#我們還能發現特征“DiabetesPedigreeFunction”(糖尿病遺傳函數)在?C=100?C=1?和C=0.001的情況下?系數都為正。
#這表明無論是哪個模型,DiabetesPedigreeFunction(糖尿病遺傳函數)這個特征值都與樣本為糖尿病是正相關的。
diabetes_features=[x?for?ix?in?enumerate(diabetes.?columns)?if?i!=8]
plt.?figure(figsize=(86))
plt.?plot(logreg.?coef_.T‘o‘?label=“C=1“)
plt.?plot(logreg100.coef_.T‘^‘?label=“C=100“)
plt.?plot(logreg100.coef_.T‘v‘?label=“C=0.001“)
plt.?xticks(range(diabetes.?shape[1])?diabetes_features?rotation=90)
plt.?hlines(00?diabetes.?shape[1])
plt.?ylim(-55)
plt.?xlabel(“Feature“)
plt.?ylabel(“Coefficient?magnitude“)
plt.?legend()
plt.?savefig(‘1og_coef‘)



#?In[71]:


#決策樹算法
from?sklearn.tree?import

?屬性????????????大小?????日期????時間???名稱
-----------?---------??----------?-----??----
?????文件??????137092??2018-07-24?15:30??DiabetesArithmetic_version3.0\DiabetesArithmetic_version3.0.ipynb
?????文件???????12481??2018-07-24?15:30??DiabetesArithmetic_version3.0\DiabetesArithmetic_version3.0.py
?????文件???????23875??2018-07-21?16:01??DiabetesArithmetic_version3.0\diabetes.csv
?????目錄???????????0??2018-07-24?15:32??DiabetesArithmetic_version3.0\

評論

共有 條評論