def calc_chiSquare(sampleSet, feature, target):
創(chuàng)新互聯(lián)專注于企業(yè)全網(wǎng)整合營(yíng)銷推廣、網(wǎng)站重做改版、朝陽(yáng)網(wǎng)站定制設(shè)計(jì)、自適應(yīng)品牌網(wǎng)站建設(shè)、html5、商城建設(shè)、集團(tuán)公司官網(wǎng)建設(shè)、外貿(mào)網(wǎng)站制作、高端網(wǎng)站制作、響應(yīng)式網(wǎng)頁(yè)設(shè)計(jì)等建站業(yè)務(wù),價(jià)格優(yōu)惠性價(jià)比高,為朝陽(yáng)等各大城市提供網(wǎng)站開發(fā)制作服務(wù)。
'''
計(jì)算某個(gè)特征每種屬性值的卡方統(tǒng)計(jì)量
params:
? ? sampleSet: 樣本集
? ? feature: 目標(biāo)特征
? ? target: 目標(biāo)Y值 (0或1) Y值為二分類變量
return:
? ? 卡方統(tǒng)計(jì)量dataframe
? ? feature: 特征名稱
? ? act_target_cnt: 實(shí)際壞樣本數(shù)
? ? expected_target_cnt:期望壞樣本數(shù)
? ? chi_square:卡方統(tǒng)計(jì)量
'''
# 計(jì)算樣本期望頻率
target_cnt = sampleSet[target].sum()
sample_cnt = len(sampleSet[target])
expected_ratio = target_cnt * 1.0/sample_cnt
# 對(duì)變量按屬性值從大到小排序
df = sampleSet[[feature, target]]
col_value = list(set(df[feature]))?
# 計(jì)算每一個(gè)屬性值對(duì)應(yīng)的卡方統(tǒng)計(jì)量等信息
chi_list = []; target_list = []; expected_target_list = []
for value in col_value:
? ? df_target_cnt = df.loc[df[feature] == value, target].sum()
? ? df_cnt = len(df.loc[df[feature] == value, target])
? ? expected_target_cnt = df_cnt * expected_ratio
? ? chi_square = (df_target_cnt - expected_target_cnt)**2 / expected_target_cnt
? ? chi_list.append(chi_square)
? ? target_list.append(df_target_cnt)
? ? expected_target_list.append(expected_target_cnt)
# 結(jié)果輸出到dataframe, 對(duì)應(yīng)字段為特征屬性值, 卡方統(tǒng)計(jì)量, 實(shí)際壞樣本量, 期望壞樣本量
chi_stats = pd.DataFrame({feature:col_value, 'chi_square':chi_list,
? ? ? ? ? ? ? ? ? ? ? ? ? 'act_target_cnt':target_list, 'expected_target_cnt':expected_target_list})
return chi_stats[[feature, 'act_target_cnt', 'expected_target_cnt', 'chi_square']]
def chiMerge_maxInterval(chi_stats, feature, maxInterval=5):
'''
卡方分箱合并--最大區(qū)間限制法
params:
? ? chi_stats: 卡方統(tǒng)計(jì)量dataframe
? ? feature: 目標(biāo)特征
? ? maxInterval:最大分箱數(shù)閾值
return:
? ? 卡方合并結(jié)果dataframe, 特征分割split_list
'''
group_cnt = len(chi_stats)
split_list = [chi_stats[feature].min()]
# 如果變量區(qū)間超過最大分箱限制,則根據(jù)合并原則進(jìn)行合并
while(group_cnt maxInterval):
? ? min_index = chi_stats[chi_stats['chi_square']==chi_stats['chi_square'].min()].index.tolist()[0]
? ? # 如果分箱區(qū)間在最前,則向下合并
? ? if min_index == 0:
? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index+1, min_index)
? ? # 如果分箱區(qū)間在最后,則向上合并
? ? elif min_index == group_cnt-1:
? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
? ? # 如果分箱區(qū)間在中間,則判斷與其相鄰的最小卡方的區(qū)間,然后進(jìn)行合并
? ? else:
? ? ? ? if chi_stats.loc[min_index-1, 'chi_square'] chi_stats.loc[min_index+1, 'chi_square']:
? ? ? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index, min_index+1)
? ? ? ? else:
? ? ? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
? ? group_cnt = len(chi_stats)
chiMerge_result = chi_stats
split_list.extend(chiMerge_result[feature].tolist())
return chiMerge_result, split_list
def chiMerge_minChiSquare(chi_stats, feature, dfree=4, cf=0.1, maxInterval=5):
'''
卡方分箱合并--卡方閾值法
params:
? ? chi_stats: 卡方統(tǒng)計(jì)量dataframe
? ? feature: 目標(biāo)特征
? ? maxInterval: 最大分箱數(shù)閾值, default 5
? ? dfree: 自由度, 最大分箱數(shù)-1, default 4
? ? cf: 顯著性水平, default 10%
return:
? ? 卡方合并結(jié)果dataframe, 特征分割split_list
'''
threshold = get_chiSquare_distuibution(dfree, cf)
min_chiSquare = chi_stats['chi_square'].min()
group_cnt = len(chi_stats)
split_list = [chi_stats[feature].min()]
# 如果變量區(qū)間的最小卡方值小于閾值,則繼續(xù)合并直到最小值大于等于閾值
while(min_chiSquare threshold and group_cnt maxInterval):
? ? min_index = chi_stats[chi_stats['chi_square']==chi_stats['chi_square'].min()].index.tolist()[0]
? ? # 如果分箱區(qū)間在最前,則向下合并
? ? if min_index == 0:
? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index+1, min_index)
? ? # 如果分箱區(qū)間在最后,則向上合并
? ? elif min_index == group_cnt-1:
? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
? ? # 如果分箱區(qū)間在中間,則判斷與其相鄰的最小卡方的區(qū)間,然后進(jìn)行合并
? ? else:
? ? ? ? if chi_stats.loc[min_index-1, 'chi_square'] chi_stats.loc[min_index+1, 'chi_square']:
? ? ? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index, min_index+1)
? ? ? ? else:
? ? ? ? ? ? chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
? ? min_chiSquare = chi_stats['chi_square'].min()
? ? group_cnt = len(chi_stats)
chiMerge_result = chi_stats
split_list.extend(chiMerge_result[feature].tolist())
return chiMerge_result, split_list
def get_chiSquare_distuibution(dfree=4, cf=0.1):
'''
根據(jù)自由度和置信度得到卡方分布和閾值
params:
? ? dfree: 自由度, 最大分箱數(shù)-1, default 4
? ? cf: 顯著性水平, default 10%
return:
? ? 卡方閾值
'''
percents = [0.95, 0.90, 0.5, 0.1, 0.05, 0.025, 0.01, 0.005]
df = pd.DataFrame(np.array([chi2.isf(percents, df=i) for i in range(1, 30)]))
df.columns = percents
df.index = df.index+1
# 顯示小數(shù)點(diǎn)后面數(shù)字
pd.set_option('precision', 3)
return df.loc[dfree, cf]
def merge_chiSquare(chi_result, index, mergeIndex, a = 'expected_target_cnt',
? ? ? ? ? ? ? ? b = 'act_target_cnt', c = 'chi_square'):
'''
params:
? ? chi_result: 待合并卡方數(shù)據(jù)集
? ? index: 合并后的序列號(hào)
? ? mergeIndex: 需合并的區(qū)間序號(hào)
? ? a, b, c: 指定合并字段
return:
? ? 分箱合并后的卡方dataframe
'''
chi_result.loc[mergeIndex, a] = chi_result.loc[mergeIndex, a] + chi_result.loc[index, a]
chi_result.loc[mergeIndex, b] = chi_result.loc[mergeIndex, b] + chi_result.loc[index, b]
chi_result.loc[mergeIndex, c] = (chi_result.loc[mergeIndex, b] - chi_result.loc[mergeIndex, a])**2 /chi_result.loc[mergeIndex, a]
chi_result = chi_result.drop([index])
chi_result = chi_result.reset_index(drop=True)
return chi_result
for col in bin_col:
chi_stats = calc_chiSquare(exp_f_data_label_dr, col, 'label')
chiMerge_result, split_list = chiMerge_maxInterval(chi_stats, col, maxInterval=5)
print(col, 'feature maybe split like this:', split_list)
2018.08.02
R語(yǔ)言中有smbining可以進(jìn)行最優(yōu)分箱,python中分箱如果既要考慮箱體個(gè)數(shù),分箱后信息量大小,也要考慮單調(diào)性等其他因素。
這里給出一種簡(jiǎn)單的通過IV值來(lái)選擇如果分箱的方法。
下面是按照分位數(shù)來(lái)分的,還可以按照卡房分箱,決策樹分箱等。
參照toad(由厚本金融開發(fā)的較標(biāo)準(zhǔn)的評(píng)分卡開發(fā)開源包)的分箱方式。
R包有 smbinning CRAN - Package smbinning
SAS中 這本 信用風(fēng)險(xiǎn)評(píng)分卡研究 (豆瓣) P140 有提及 SAS 實(shí)現(xiàn)自動(dòng)分箱的宏,SAS代碼在書本的附錄。
還有這里講解了決策樹的三個(gè)處理方法,自動(dòng)分箱的原理基本就是利用決策樹做單變量的分支 Decision Tree Algorithms !
Monotonic Binning with Python
Monotonic binning is a data preparation technique widely used in scorecard development and is usually implemented with SAS. Below is an attempt to do the monotonic binning with python.
Python Code:
# import packages
import pandas as pd
import numpy as np
import scipy.stats.stats as stats
# import data
data = pd.read_csv("/home/liuwensui/Documents/data/accepts.csv", sep = ",", header = 0)
# define a binning function
def mono_bin(Y, X, n = 20):
# fill missings with median
X2 = X.fillna(np.median(X))
r = 0
while np.abs(r) 1:
d1 = pd.DataFrame({"X": X2, "Y": Y, "Bucket": pd.qcut(X2, n)})
d2 = d1.groupby('Bucket', as_index = True)
r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)
n = n - 1
d3 = pd.DataFrame(d2.min().X, columns = ['min_' + X.name])
d3['max_' + X.name] = d2.max().X
d3[Y.name] = d2.sum().Y
d3['total'] = d2.count().Y
d3[Y.name + '_rate'] = d2.mean().Y
d4 = (d3.sort_index(by = 'min_' + X.name)).reset_index(drop = True)
print "=" * 60
print d4
mono_bin(data.bad, data.ltv)
mono_bin(data.bad, data.bureau_score)
mono_bin(data.bad, data.age_oldest_tr)
mono_bin(data.bad, data.tot_tr)
mono_bin(data.bad, data.tot_income)
Output:
============================================================
min_ltv max_ltv bad total bad_rate
0 0 83 88 884 0.099548
1 84 92 137 905 0.151381
2 93 98 175 851 0.205640
3 99 102 173 814 0.212531
4 103 108 194 821 0.236297
5 109 116 194 769 0.252276
6 117 176 235 793 0.296343
============================================================
min_bureau_score max_bureau_score bad total bad_rate
0 443 630 325 747 0.435074
1 631 655 242 721 0.335645
2 656 676 173 721 0.239945
3 677 698 245 1059 0.231350
4 699 709 64 427 0.149883
5 710 732 73 712 0.102528
6 733 763 53 731 0.072503
7 764 848 21 719 0.029207
============================================================
min_age_oldest_tr max_age_oldest_tr bad total bad_rate
0 1 59 319 987 0.323202
1 60 108 235 975 0.241026
2 109 142 282 1199 0.235196
3 143 171 142 730 0.194521
4 172 250 125 976 0.128074
5 251 588 93 970 0.095876
============================================================
min_tot_tr max_tot_tr bad total bad_rate
0 0 8 378 1351 0.279793
1 9 13 247 1025 0.240976
2 14 18 240 1185 0.202532
3 19 25 165 1126 0.146536
4 26 77 166 1150 0.144348
============================================================
min_tot_income max_tot_income bad total bad_rate
0 0.00 2000.00 323 1217 0.265407
1 2002.00 2916.67 259 1153 0.224631
2 2919.00 4000.00 226 1150 0.196522
3 4001.00 5833.33 231 1186 0.194772
4 5833.34 8147166.66 157 1131 0.138815
list =[None,None,None,None,"a","b","c",None,"d",12,None,2,4,5,4] list = list[4:] len(list)11 list['a', 'b', 'c', None, 'd', 12, None, 2, 4, 5, 4]#如果你的list 格式是相同的 比如前面4個(gè)都是None,這個(gè)格式是固定的,那么切片很容易解決
分享題目:自動(dòng)分箱函數(shù)python 分箱 python
轉(zhuǎn)載來(lái)源:http://jinyejixie.com/article16/doscdgg.html
成都網(wǎng)站建設(shè)公司_創(chuàng)新互聯(lián),為您提供企業(yè)網(wǎng)站制作、品牌網(wǎng)站制作、服務(wù)器托管、網(wǎng)站設(shè)計(jì)、定制開發(fā)、網(wǎng)站制作
聲明:本網(wǎng)站發(fā)布的內(nèi)容(圖片、視頻和文字)以用戶投稿、用戶轉(zhuǎn)載內(nèi)容為主,如果涉及侵權(quán)請(qǐng)盡快告知,我們將會(huì)在第一時(shí)間刪除。文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如需處理請(qǐng)聯(lián)系客服。電話:028-86922220;郵箱:631063699@qq.com。內(nèi)容未經(jīng)允許不得轉(zhuǎn)載,或轉(zhuǎn)載時(shí)需注明來(lái)源: 創(chuàng)新互聯(lián)