import pandas as pd import numpy as np # ============ delete / fill   Null value ============ # 
 in many instances , If you use  Pandas  To read a lot of data , It is often found that there are incomplete parts in the original data . #  stay  DataFrame  Missing data location in , Pandas 
 Will automatically fill in a null value , such as  NaN or  Null . #  We can choose to use it  .dropna()  To discard these auto populated values ; #  Or use .fillna() 
 Automatically fill these null values with data . # ------- delete -------- # 1, Instance data source ( With null value ) dt_01 = {'A':[1, np.nan, 3], 
'B':[2,np.nan,np.nan], 'C':[4,5,6]} my_datafarme_01 = pd.DataFrame(dt_01) 
#print(my_datafarme_01) # 2, When you use  .dropna()  Method , It's telling  Pandas  Delete rows with one or more null values ( Or column ). 
#  Delete line with  .dropna(axis=0) , #  Delete column with  .dropna(axis=1) . #  Please note that , If you don't specify  axis 
 parameter , The default is to delete rows . #print(my_datafarme_01.dropna()) 
#print(my_datafarme_01.dropna(axis=0)) #print(my_datafarme_01.dropna(axis=1)) # 
--------- Fill all Nan-------- #  allied , If you use  .fillna()  method ,Pandas  Will be on this  DataFrame 
 Fill in the default value you specified for all the null values in . #  such as , Set all  NaN  replace with  20 : #print(my_datafarme_01.fillna('20')) 
# --------- Fills the Nan----------- #  It's too big to avoid , So we can choose to do this only for certain rows or columns  Nan value   fill  col 
= ['A','B'] my_datafarme_01[col] = my_datafarme_01[col].fillna('10') 
#print(my_datafarme_01) #  In the same way ,.dropna()  and  .fillna()  It doesn't change your data permanently , Unless you pass in  
inplace=True  parameter . # =============== Group statistics =============== # Pandas 
 Group statistics function of : You can group data rows by the contents of a column , Statistical function is applied to it , Such as summation , average , median , Standard deviation and so on … #  example : We can use the  
.groupby()  method , Press  'Company'  Column to group , Combined use  .mean()  Average each group : # ------- data sheet ------- dt_01 = 
{'Company':['GOOGLE','GOOGLE','ORACLE','ORACLE','TWITIER','TWITIER'], 
'Person':['Saa','Charlie','Amy','Vanessa','Carl','Sarah'], 'Sales':[200, 120, 
340, 124, 243, 350] } my_datafarme_02 = pd.DataFrame(dt_01) 
#print(my_datafarme_02) # -------- Group and average -------- #  then , call  .groupby()  method , And continue to use  
.mean()  Average : #print(my_datafarme_02.groupby('Company').mean()) #  use  .count() 
 method , Yes  DataFrame  Count the number of times an element in the . 
#print(my_datafarme_02.groupby('Company').count()) # 
================== data description  ============== # Pandas  Of  .describe()  Method will be applied to  DataFrame 
 Inside << data >> Analysis ,( Only data element columns will be analyzed ) #  And generate a number of descriptive statistical indicators at one time , Convenient for users to have an intuitive understanding of the data . # 
 Generated metrics , From left to right : count , average , standard deviation , minimum value ,(25% 50% 75%) Quantile , Maximum . 
#print(my_datafarme_02.groupby('Company').describe()) # 
--------- Convert display styles ( Vertical row )----------- 
#print(my_datafarme_02.groupby('Company').describe().transpose()) # 
--------- Specifies the data description for the index ------- 
#print(my_datafarme_02.groupby('Company').describe().transpose()['GOOGLE']) # 
================== Stacking (Concat)============= #  Stacking : Basically, it's a simple way to put multiple  DataFrame 
 Pile them together , Put together a bigger one  DataFrame. #  When you stack , Be sure to pay attention to the index of your data table and the extension direction of columns , Stack in the same direction . # 
1,--- data source ---- #dt_02 = 
pd.DataFrame(np.array(['A0','B0','C0','D0','A1','B1','C1','D1','A2','B2','C2','D2','A3','B3','C3','D3']).reshape(4,4),[0,1,2,3],['A','B','C','D']) 
#dt_03 = 
pd.DataFrame(np.array(['A4','B4','C4','D4','A5','B5','C5','D5','A6','B6','C6','D6','A7','B7','C7','D7']).reshape(4,4),[4,5,6,7],['A','B','C','D']) 
#dt_04 = 
pd.DataFrame(np.array(['A8','B8','C8','D8','A9','B9','C9','D9','A10','B10','C10','D10','A11','B11','C11','D11']).reshape(4,4),[8,9,10,11],['A','B','C','D']) 
#print(dt_02) #print(dt_03) #print(dt_04) # 2,--- Default stack ( Stack by row )----- #  We use it  
pd.concat()  Stack it into a large table : #print(pd.concat([dt_02,dt_03,dt_04])) # 
3,--- Specifies the stacking direction ( Stack by column )----- #print(pd.concat([dt_02,dt_03,dt_04], axis=1)) # 
=================== Merger (Merge)================( Used for cases with more common listing )( Merge common columns ) #  use  pd.merge() 
 function , Can combine multiple  DataFrame  Merge together , It is merged in a similar way  SQL  Data table mode ; #  The basic syntax of merge operation is : pd.merge(left, 
right, how='inner', on='Key') . #  among  left  The parameter represents the  DataFrame, and  right  The parameter represents the  
DataFrame; # how='inner'  It's about the left and right  DataFrame  There are non coincident  Key  Time , How to get the results :inner 
 Representative intersection ;Outer  Union of Representatives . #  last ,on='Key'  Represents the column in which the key values need to be merged , Finally, the whole table will be merged according to this column . # 
------- data source -------- left_data_01 = pd.DataFrame({'KEY': ['K0','K1','K2','K3'], 
'A': ['A0','A1','A2','A3'], 'B': ['B0','B1','B2','B3'] }) right_data_01 = 
pd.DataFrame({'KEY': ['K0','K1','K2','K3'], 'C': ['C0','C1','C2','C3'], 'D': 
['D0','D1','D2','D3'] }) # ------- Two  DataFrame  Data sheet merging ----- ( It is equivalent to inner join query in database ) 
#print(pd.merge(left_data_01,right_data_01,how='inner',on='KEY')) # -------- Multiple  
on  parameter ------- left_data_02 = pd.DataFrame({'KEY1': ['K0','K0','K1','K2'], 
'KEY2': ['K0','K1','K0','K1'], 'A': ['A0','A1','A2','A3'], 'B': 
['B0','B1','B2','B3'] }) #print(left_data_02) right_data_02 = 
pd.DataFrame({'KEY1': ['K0','K1','K1','K2'], 'KEY2': ['K0','K0','K0','K0'], 
'C': ['C0','C1','C2','C3'], 'D': ['D0','D1','D2','D3'] }) #print(right_data_02) 
#print(pd.merge(left_data_02,right_data_02, on='KEY1')) 
#print(pd.merge(left_data_02,right_data_02, on='KEY2')) 
#print(pd.merge(left_data_02,right_data_02, on=['KEY1','KEY2'])) # 
================== connect (join)=================( Used in situations where there is not much common listing )( Merge common keys ) #  and  .merge() 
 Different , The join takes the index as the common key , Not a column  # ------- data source ------- left_data_03 = pd.DataFrame({'A': 
['A0','A1','A2'], 'B': ['B0','B1','B2']}, index=['K0','K1','K2']) right_data_03 
= pd.DataFrame({'C': ['C0','C2','C3'], 'D': ['D0','D2','D3']}, 
index=['K0','K2','K3']) #print(left_data_03) #print(right_data_03) # 
------- connect ---------( Default left connection )( Based on the index of the left table ) #print(left_data_03.join(right_data_03)) # 
------- intersection ------- #print(left_data_03.join(right_data_03, how='inner')) # 
------- Union ------- #print(left_data_03.join(right_data_03, how='outer')) # 
================== Find non duplicate values =============== #  Non repeating values , In a  DataFrame 
 It's always unique , Distinctive . Non duplicate values found , It is helpful to avoid sample deviation in data analysis  #  stay  Pandas  in , Mainly used  3  Methods : # ----1, First of all  
.unique()  method . Like the one below  DataFrame  in , lookup  col2  All non duplicate values in the column : df = pd.DataFrame({'col1': 
[1,2,3,4], 'col2': [444,555,666,444], 'col3': ['abc','def','ghi','xyz']}) 
#print(df) #print(df['col2'].unique()) # ================= Find the number of distinct values =========== 
# ----2, In addition to listing all non duplicate values , We can still use it  .nunique()  method , Gets the number of all non duplicate values : 
#print(df['col2'].nunique()) # ================= Find unique values and their number =========== # 
----3, in addition , It works  .value_counts()  Get a count of all values and corresponding values at the same time : #print(df['col2'].value_counts()) 
# ================== Custom processing function =============== #  use  .apply()  method , Yes, yes  DataFrame 
 User defined function for data in , Data processing . #  example : such as , Let's define a  square()  function , Then check the  col1  Column applies this function : def 
square(x): return x*x #print(df['col1'].apply(square)) # ---- Use built-in functions ----- 
#print(df['col3'].apply(len)) # ---- use  lambda  Expressions define functions ------ 
#print(df['col1'].apply(lambda x:x*x)) # ================== obtain  DataFrame 
 Properties of =========== # DataFrame  Properties of : include   column   and   Indexes   The name of  #print(df.columns) 
#print(df.index) # ================== sort ================= # 
 If you want to sort the entire table by the value of a column , It can be used  .sort_values() : #  It should be noted that , Table index  index 
 Or corresponding to the row before sorting , The original index data is not lost because of sorting . #print(df.sort_values('col2')) # 
================ Find null values =============== #  If you have a large data set , You can use it  Pandas  Of  .isnull() 
 method , Finding null values in a table easily and quickly : #  This returns a new one  DataFrame, Boolean values are used in it (True/False) Original  DataFrame 
 Is the data at the corresponding position in is null . #print(df.isnull()) # ================ PivotTable =============== # 
 PivotTable : It's a summary table , It shows the summary statistical results of the data in the original table . # Pandas 
 The PivotTable can automatically group data for you , section , screen , sort , count , Sum or average , And the results are displayed intuitively . # ---- data source -------- data_02 = 
{ 'A':['Dog', 'Dog', 'Dog', 'Goat', 'Goat', 'Goat'], 'B':['Brown', 'Brown', 
'Black', 'Black', 'Brown', 'Brown'], 'C':['x', 'y', 'x', 'y', 'x', 'y'], 
'D':[1,3,2,5,4,1] } df_02 = pd.DataFrame(data_02) print(df_02) # ----- PivotTable ---- 
# Pandas  The syntax of a PivotTable is  .pivot_table(data, values='', index=[''], columns=['']) , # 
 among  values  Represents the column where the data points we need to summarize statistics are located , # index  Indicates that the index is grouped by the column , #  and  columns 
 It means that the final results will be broken down by the data in the column . #print(pd.pivot_table(df_02, values='D', index=['A','B'], 
columns=['C'])) #  Above is through  pandas  Call this method , It can also be done through  DataFrame  Object calls this method directly : 
#print(df_02.pivot_table(values='D', index=['A','B'], columns=['C'])) 
 
Technology