<> Common library import
import pandas as pd import numpy as np import matplotlib.pyplot as plt import
seabornas sns import warnings warnings.filterwarnings("ignore") pd.options.
display.max_columns = None # Show all columns pd.set_option('display.float_format', lambda x
: '%.2f' % x) # Abolishing scientific counting
<>1, Data integration (concat)
train_data = pd.read_csv('training30.csv') test_data = pd.read_csv('test30.csv'
) total_data = pd.concat([train_data, test_data], axis=0) total_data.info()

<>2, Data filtering
cdma = pd.read_csv('cdma.xls', encoding='gbk', sep='\t') print(cdma.shape) cdma
= cdma[(cdma[' Regional sales Bureau '] == ' Pudong Telecom Bureau ') & (cdma[' Channel management segmentation '].isin([' Exclusive channel ', ' Medium and small channels ', ' Open channels ']
))] print(cdma.shape) # cdma = cdma[~cdma[' Name of Development Department '].str.contains(' Forever ')] #
print(cdma.shape)

<>3, Data matching (merge)
match_table = pd.read_excel(' Data description and matching formula .xlsx', sheet_name=' Department matching table ') new_cdma =
cdma.merge(match_table, how='left', on=[' Name of Development Department ', ' Channel management segmentation ']) new_cdma = new_cdma
[new_cdma[' Channel management segmentation '] == ' Exclusive channel '] new_cdma[[' Statistics date ', ' order number ', ' Department ', ' Agent ', ' Branch ',
' Channel Manager ']].head()

<>4, PivotTable report (pivot_table)
cdma_pivot = new_cdma.pivot_table(index=' Agent ', values=' order number ', columns=' Branch ',
aggfunc='count', fill_value=0, margins=True, margins_name=' total ') cdma_pivot

<>5, Data sorting (sort_values)
cdma_pivot.sort_values(by=' total ',inplace=True, ascending=False) cdma_pivot

<>6, Data replacement (replace)
train_data = train_data.replace('?', np.nan) # Precise matching train_data.head(10)
train_data2= train_data.replace('Tai', 'Cy', regex=True) # Fuzzy matching train_data2.head(
10)

<>7, Data deletion (dropna)
print(train_data.shape) train_data3 = train_data.dropna(subset=['gender', 'age'
]) print(train_data3.shape)

<>8, Downsampling
def lower_sample_data(df, labelname, percent=1): '''
percent: The ratio of the number of samples in the majority category to the number of samples in the minority category ''' data1 = df[df[labelname] == 1] #
Put a few categories of samples in the data1 data0 = df[df[labelname] == 0] # Put most of the samples in the data0 index = np.
random.randint( len(data0), size=percent * (len(data1))) # The serial number of the sample is taken out at random
lower_data0= data0.iloc[list(index)] # Down sampling return(pd.concat([lower_data0, data1]
)) print(train_data["'Purchase or not'"].value_counts()) train_data4 =
lower_sample_data(train_data, "'Purchase or not'", percent=1) print(train_data4[
"'Purchase or not'"].value_counts())

<>9, Missing value processing (fillna)
train_data5 = pd.read_csv('cs-training.csv') per_columns = set(train_data5.
columns) - set(['CustomerID', 'SeriousDlqin2yrs']) for column in per_columns:
temp_mean= train_data5[column].mean() # If it's a median, yes median, The majority is mode train_data5[
column] = train_data5[column].fillna(temp_mean) train_data5.describe()

<>10, Noise treatment

<> Method 1 : Quartile method
def cap(x, quantile=[0.05, 0.95]): """ Cap method to deal with outliers Args: x:pd.Series column , continuous variable
quantile: Specifies the upper and lower quantile range for the cap method """ # Generation quantile Q05, Q95=x.quantile(quantile).values.tolist()
# Replace the outlier with the specified quantile if Q05 > x.min(): x = x.copy() x.loc[x<Q05] = Q05 if Q95 < x.max(
): x = x.copy() x.loc[x>Q95] = Q95 return(x) train_data6 = train_data5[
per_columns] train_data6 = train_data6.apply(cap) train_data7 = pd.concat([
train_data5[['CustomerID', 'SeriousDlqin2yrs']], train_data6], axis=1)
train_data7= train_data7[train_data5.columns] train_data7.describe()

<> Method 2 : Average method
def cap_mean(x): """ Cap method to deal with outliers Args: x:pd.Series column , continuous variable """ # Generate the upper and lower bounds of the mean and standard deviation x_up
= x.mean() + 3*x.std() x_down = x.mean() - 3*x.std() # Replace outliers if x_down > x.min()
: x = x.copy() x.loc[x<x_down] = x_down if x_up < x.max(): x = x.copy() x.loc[x>
x_up] = x_up return(x) train_data8 = train_data5[per_columns] train_data8 =
train_data8.apply(cap_mean) train_data9 = pd.concat([train_data5[['CustomerID',
'SeriousDlqin2yrs']], train_data8], axis=1) train_data9 = train_data9[
train_data5.columns] train_data9.describe()

<>11, Data normalization / Standardization
from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing
import StandardScaler mm_scaler = MinMaxScaler() ss_scaler = StandardScaler()
print(train_data9['age'].head()) train_data9['age'] = mm_scaler.fit_transform(
train_data9[['age']]) print(train_data9['age'].head()) print(
'-------------------------------------------------') print(train_data9[
'MonthlyIncome'].head()) train_data9['MonthlyIncome'] = ss_scaler.fit_transform(
train_data9[['MonthlyIncome']]) print(train_data9['MonthlyIncome'].head())

<>12, data generalization (map)
print(cdma[' Development channels '].value_counts()) qd_map = {' Self operated business hall ': ' Self operated channel ', ' Franchise store ': ' Exclusive channel ',
' Cooperative business hall ': ' Exclusive channel ', ' Core channel zone counter ':' Exclusive channel ', ' Tianyi store ':' Medium and small channels ', ' Outsourcing business hall ':' Exclusive channel ', ' National chain stores ':
' Open channels ', ' a phone fit all kinds of networks ( Exclusive )':' Exclusive channel ', ' Business District store ':' Exclusive channel ', ' Tianyi cooperative store ':' Medium and small channels ', ' Terminal retail store ( to open up )':' Medium and small channels '}
cdma_2= cdma.copy() cdma_2[' Channel statistics classification '] = cdma_2[' Development channels '].map(qd_map) print(cdma_2
[' Channel statistics classification '].value_counts())

<>13, Continuous assignment (LabelEncoder)
from sklearn.preprocessing import LabelEncoder le = LabelEncoder() cdma_2[
' Channel statistics classification '] = le.fit_transform(cdma_2[[' Channel statistics classification ']]) cdma_2[' Channel statistics classification '].value_counts()

<>14, Data discretization (cut/qcut)

<> Method 1 : Artificial separation
age_range = list(range(0,111,10)) train_data5['age_cut1'] = pd.cut(train_data5[
'age'], age_range, include_lowest=True, right=False) train_data5['age_cut1'].
value_counts().sort_index()

<> Method 2 : Equal width packing method
train_data5['age_cut2'] = pd.cut(train_data5['age'], bins=10, include_lowest=
True, right=False, precision=0) train_data5['age_cut2'].value_counts().
sort_index()

<> Method 3 : Equal depth packing method
train_data5['age_cut3'] = pd.qcut(train_data5['age'], 10, precision=1)
train_data5['age_cut3'].value_counts().sort_index()

Technology
Daily Recommendation