Pandas 10分钟入门(官方说明+个人小测试)

来源:互联网 发布:国债期货 知乎 编辑:程序博客网 时间:2024/05/24 23:16

Pandas 10分钟入门


代码下载地址[http://download.csdn.net/download/sirwill/10043185]

In [19]:
import pandas as pdimport numpy as npimport matplotlib.pyplot as plt

Object Creation

In [20]:
s= pd.Series([1,2,3,np.nan,5,6,])   #series 类型数组。s
Out[20]:
0    1.01    2.02    3.03    NaN4    5.05    6.0dtype: float64
In [21]:
dates= pd.date_range("20170112",periods=6) #Creating a DataFrame by passing a numpy array, with a datetime index and labeled columndates
Out[21]:
DatetimeIndex(['2017-01-12', '2017-01-13', '2017-01-14', '2017-01-15',               '2017-01-16', '2017-01-17'],              dtype='datetime64[ns]', freq='D')
In [22]:
list(dates)dates.date
Out[22]:
array([datetime.date(2017, 1, 12), datetime.date(2017, 1, 13),       datetime.date(2017, 1, 14), datetime.date(2017, 1, 15),       datetime.date(2017, 1, 16), datetime.date(2017, 1, 17)], dtype=object)
In [23]:
list(dates.date)
Out[23]:
[datetime.date(2017, 1, 12), datetime.date(2017, 1, 13), datetime.date(2017, 1, 14), datetime.date(2017, 1, 15), datetime.date(2017, 1, 16), datetime.date(2017, 1, 17)]
In [24]:
dates.year
Out[24]:
Int64Index([2017, 2017, 2017, 2017, 2017, 2017], dtype='int64')
In [25]:
list(dates.year)
Out[25]:
[2017, 2017, 2017, 2017, 2017, 2017]
In [26]:
list(dates.day)
Out[26]:
[12, 13, 14, 15, 16, 17]
In [27]:
str(dates.date)
Out[27]:
'[datetime.date(2017, 1, 12) datetime.date(2017, 1, 13)\n datetime.date(2017, 1, 14) datetime.date(2017, 1, 15)\n datetime.date(2017, 1, 16) datetime.date(2017, 1, 17)]'
In [28]:
df=pd.DataFrame(np.random.randn(6,4),index=dates,columns=list("ABCD"))df
Out[28]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460
In [29]:
df2 = pd.DataFrame({ 'A' : 1.,                    'B' : pd.Timestamp('20130102'),                    'C' : pd.Series(1,index=list(range(4)),dtype='float32'),                    'D' : np.array([3] * 4,dtype='int32'),                    'E' : pd.Categorical(["test","train","test","train"]),                    'F' : 'foo' }) #Creating a DataFrame by passing a dict of objects that can be converted to series-like.df2
Out[29]:
 ABCDEF01.02013-01-021.03testfoo11.02013-01-021.03trainfoo21.02013-01-021.03testfoo31.02013-01-021.03trainfoo
In [30]:
df2.dtypes
Out[30]:
A           float64B    datetime64[ns]C           float32D             int32E          categoryF            objectdtype: object
In [31]:
df.dtypes
Out[31]:
A    float64B    float64C    float64D    float64dtype: object
In [32]:
df2.<TAB> #使用jupyter时按tab键,可以看到代码提示。
  File "<ipython-input-32-9c4c8dafe199>", line 1    df2.<TAB> #If you’re using IPython, tab completion for column names (as well as public attributes) is automatically enabled.        ^SyntaxError: invalid syntax

Viewing Data

In [36]:
df.head()
Out[36]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.898683
In [37]:
df.index
Out[37]:
DatetimeIndex(['2017-01-12', '2017-01-13', '2017-01-14', '2017-01-15',               '2017-01-16', '2017-01-17'],              dtype='datetime64[ns]', freq='D')
In [38]:
df.columns
Out[38]:
Index(['A', 'B', 'C', 'D'], dtype='object')
In [39]:
df.values
Out[39]:
array([[-2.2581213 ,  2.45619592,  0.77856734, -2.030407  ],       [-0.65834822,  0.62249451,  0.38862467,  0.07358728],       [ 0.58921899,  1.39279193,  0.60554535,  1.23153815],       [-0.1519579 , -0.65524863, -2.1147252 , -0.66983949],       [-1.32330447,  3.14365936,  0.63899562,  0.89868346],       [-0.02493461,  0.3858107 , -1.57718486, -0.0214603 ]])
In [40]:
df.describe()
Out[40]:
 ABCDcount6.0000006.0000006.0000006.000000mean-0.6379081.224284-0.213363-0.086316std1.0210781.4019871.2820791.171045min-2.258121-0.655249-2.114725-2.03040725%-1.1570650.444982-1.085732-0.50774550%-0.4051531.0076430.4970850.02606375%-0.0566902.1903450.6306330.692409max0.5892193.1436590.7785671.231538
In [41]:
df
Out[41]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460
In [42]:
df.T
Out[42]:
 2017-01-12 00:00:002017-01-13 00:00:002017-01-14 00:00:002017-01-15 00:00:002017-01-16 00:00:002017-01-17 00:00:00A-2.258121-0.6583480.589219-0.151958-1.323304-0.024935B2.4561960.6224951.392792-0.6552493.1436590.385811C0.7785670.3886250.605545-2.1147250.638996-1.577185D-2.0304070.0735871.231538-0.6698390.898683-0.021460
In [43]:
df.sort_index(axis=1,ascending=False) #Sorting by an axis  排序。
Out[43]:
 DCBA2017-01-12-2.0304070.7785672.456196-2.2581212017-01-130.0735870.3886250.622495-0.6583482017-01-141.2315380.6055451.3927920.5892192017-01-15-0.669839-2.114725-0.655249-0.1519582017-01-160.8986830.6389963.143659-1.3233042017-01-17-0.021460-1.5771850.385811-0.024935
In [44]:
df.sort_values(by="B") #Sorting by values
Out[44]:
 ABCD2017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-17-0.0249350.385811-1.577185-0.0214602017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-12-2.2581212.4561960.778567-2.0304072017-01-16-1.3233043.1436590.6389960.898683
In [45]:
df
Out[45]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460

Selection

Getting

In [46]:
df["A"]# Selecting a single column, which yields a Series, equivalent to df.A
Out[46]:
2017-01-12   -2.2581212017-01-13   -0.6583482017-01-14    0.5892192017-01-15   -0.1519582017-01-16   -1.3233042017-01-17   -0.024935Freq: D, Name: A, dtype: float64
In [47]:
df.A
Out[47]:
2017-01-12   -2.2581212017-01-13   -0.6583482017-01-14    0.5892192017-01-15   -0.1519582017-01-16   -1.3233042017-01-17   -0.024935Freq: D, Name: A, dtype: float64
In [48]:
df[0:3]  #Selecting via [], which slices the rows.
Out[48]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.231538
In [49]:
df["2017-01-13":"2017-01-17"]
Out[49]:
 ABCD2017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460

Selection by Label

In [50]:
dates
Out[50]:
DatetimeIndex(['2017-01-12', '2017-01-13', '2017-01-14', '2017-01-15',               '2017-01-16', '2017-01-17'],              dtype='datetime64[ns]', freq='D')
In [51]:
df.loc[dates[0]] #For getting a cross section using a label
Out[51]:
A   -2.258121B    2.456196C    0.778567D   -2.030407Name: 2017-01-12 00:00:00, dtype: float64
In [52]:
df.loc[:,["A","B"]]
Out[52]:
 AB2017-01-12-2.2581212.4561962017-01-13-0.6583480.6224952017-01-140.5892191.3927922017-01-15-0.151958-0.6552492017-01-16-1.3233043.1436592017-01-17-0.0249350.385811
In [53]:
df.loc['20170112':'20170116',['A','B']] #Showing label slicing, both endpoints are included
Out[53]:
 AB2017-01-12-2.2581212.4561962017-01-13-0.6583480.6224952017-01-140.5892191.3927922017-01-15-0.151958-0.6552492017-01-16-1.3233043.143659
In [54]:
df.loc["20170115",["A","B"]] 
Out[54]:
A   -0.151958B   -0.655249Name: 2017-01-15 00:00:00, dtype: float64
In [55]:
df.loc[dates[3],"D"] #For getting a scalar value
Out[55]:
-0.6698394854437093
In [56]:
df.at[dates[3],"D"] #For getting fast access to a scalar (equiv to the prior method)
Out[56]:
-0.6698394854437093

Selection by Position

In [57]:
df.iloc[3] #Select via the position of the passed integers
Out[57]:
A   -0.151958B   -0.655249C   -2.114725D   -0.669839Name: 2017-01-15 00:00:00, dtype: float64
In [58]:
df.iloc[2:5,0:2] # By integer slices, acting similar to numpy/python
Out[58]:
 AB2017-01-140.5892191.3927922017-01-15-0.151958-0.6552492017-01-16-1.3233043.143659
In [59]:
df.iloc[[1,3,4],[0,2]] #By lists of integer position locations, similar to the numpy/python style
Out[59]:
 AC2017-01-13-0.6583480.3886252017-01-15-0.151958-2.1147252017-01-16-1.3233040.638996
In [60]:
df.iloc[1:3,:]
Out[60]:
 ABCD2017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.231538
In [61]:
df.iloc[:,1:3]
Out[61]:
 BC2017-01-122.4561960.7785672017-01-130.6224950.3886252017-01-141.3927920.6055452017-01-15-0.655249-2.1147252017-01-163.1436590.6389962017-01-170.385811-1.577185
In [62]:
df.iloc[1,1] #For getting a value explicitly
Out[62]:
0.62249451281708756
In [63]:
df.iat[1,1] #For getting fast access to a scalar (equiv to the prior method)
Out[63]:
0.62249451281708756

Boolean Indexing

In [64]:
df[df.A>0] #Using a single column’s values to select data
Out[64]:
 ABCD2017-01-140.5892191.3927920.6055451.231538
In [65]:
df[df>0] #Selecting values from a DataFrame where a boolean condition is met
Out[65]:
 ABCD2017-01-12NaN2.4561960.778567NaN2017-01-13NaN0.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15NaNNaNNaNNaN2017-01-16NaN3.1436590.6389960.8986832017-01-17NaN0.385811NaNNaN
In [66]:
df2
Out[66]:
 ABCDEF01.02013-01-021.03testfoo11.02013-01-021.03trainfoo21.02013-01-021.03testfoo31.02013-01-021.03trainfoo
In [67]:
df
Out[67]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460
In [68]:
df2=df.copy()df2
Out[68]:
 ABCD2017-01-12-2.2581212.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460
In [69]:
df.equals(df2)
Out[69]:
True
In [70]:
df==df2
Out[70]:
 ABCD2017-01-12TrueTrueTrueTrue2017-01-13TrueTrueTrueTrue2017-01-14TrueTrueTrueTrue2017-01-15TrueTrueTrueTrue2017-01-16TrueTrueTrueTrue2017-01-17TrueTrueTrueTrue
In [71]:
df is df2
Out[71]:
False
In [72]:
df2["E"]=["one","one","two","three","four","three"]df2
Out[72]:
 ABCDE2017-01-12-2.2581212.4561960.778567-2.030407one2017-01-13-0.6583480.6224950.3886250.073587one2017-01-140.5892191.3927920.6055451.231538two2017-01-15-0.151958-0.655249-2.114725-0.669839three2017-01-16-1.3233043.1436590.6389960.898683four2017-01-17-0.0249350.385811-1.577185-0.021460three
In [73]:
df2[df2.E.isin(["two","four"])]
Out[73]:
 ABCDE2017-01-140.5892191.3927920.6055451.231538two2017-01-16-1.3233043.1436590.6389960.898683four
In [74]:
df2[df2["E"].isin(["two","four"])]
Out[74]:
 ABCDE2017-01-140.5892191.3927920.6055451.231538two2017-01-16-1.3233043.1436590.6389960.898683four

Setting

In [75]:
s1= pd.Series([1,2,3,4,5,6],index=pd.date_range("20171016",periods=6)) #Setting a new column automatically aligns the data by the indexess1
Out[75]:
2017-10-16    12017-10-17    22017-10-18    32017-10-19    42017-10-20    52017-10-21    6Freq: D, dtype: int64
In [76]:
df.at[dates[0],"A"]=0 #Setting values by label
In [77]:
df
Out[77]:
 ABCD2017-01-120.0000002.4561960.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460
In [78]:
df.iat[0,1]=0df
Out[78]:
 ABCD2017-01-120.0000000.0000000.778567-2.0304072017-01-13-0.6583480.6224950.3886250.0735872017-01-140.5892191.3927920.6055451.2315382017-01-15-0.151958-0.655249-2.114725-0.6698392017-01-16-1.3233043.1436590.6389960.8986832017-01-17-0.0249350.385811-1.577185-0.021460
In [79]:
df.loc[:,"D"]=np.array([5]*len(df)) #Setting by assigning with a numpy arraydf
Out[79]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [80]:
df2=df.copy()df2
Out[80]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [81]:
df2[df2>0]=-df2df2
Out[81]:
 ABCD2017-01-120.0000000.000000-0.778567-52017-01-13-0.658348-0.622495-0.388625-52017-01-14-0.589219-1.392792-0.605545-52017-01-15-0.151958-0.655249-2.114725-52017-01-16-1.323304-3.143659-0.638996-52017-01-17-0.024935-0.385811-1.577185-5

Missing Data

In [83]:
df
Out[83]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [84]:
df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E'])df1.loc[dates[0]:dates[1],'E'] = 1df1
Out[84]:
 ABCDE2017-01-120.0000000.0000000.77856751.02017-01-13-0.6583480.6224950.38862551.02017-01-140.5892191.3927920.6055455NaN2017-01-15-0.151958-0.655249-2.1147255NaN
In [85]:
df1.dropna(how="any") #To drop any rows that have missing data
Out[85]:
 ABCDE2017-01-120.0000000.0000000.77856751.02017-01-13-0.6583480.6224950.38862551.0
In [86]:
df1.fillna(value=5)  # Filling missing data
Out[86]:
 ABCDE2017-01-120.0000000.0000000.77856751.02017-01-13-0.6583480.6224950.38862551.02017-01-140.5892191.3927920.60554555.02017-01-15-0.151958-0.655249-2.11472555.0
In [87]:
df1
Out[87]:
 ABCDE2017-01-120.0000000.0000000.77856751.02017-01-13-0.6583480.6224950.38862551.02017-01-140.5892191.3927920.6055455NaN2017-01-15-0.151958-0.655249-2.1147255NaN
In [88]:
pd.isnull(df1)
Out[88]:
 ABCDE2017-01-12FalseFalseFalseFalseFalse2017-01-13FalseFalseFalseFalseFalse2017-01-14FalseFalseFalseFalseTrue2017-01-15FalseFalseFalseFalseTrue
In [89]:
df1.isnull()
Out[89]:
 ABCDE2017-01-12FalseFalseFalseFalseFalse2017-01-13FalseFalseFalseFalseFalse2017-01-14FalseFalseFalseFalseTrue2017-01-15FalseFalseFalseFalseTrue
In [90]:
df1.isna()  #没有这个方法~~
---------------------------------------------------------------------------AttributeError                            Traceback (most recent call last)<ipython-input-90-9dd6d031e095> in <module>()----> 1 df1.isna()  #没有这个方法~~D:\Users\asus\Anaconda3\lib\site-packages\pandas\core\generic.py in __getattr__(self, name)   2968             if name in self._info_axis:   2969                 return self[name]-> 2970             return object.__getattribute__(self, name)   2971    2972     def __setattr__(self, name, value):AttributeError: 'DataFrame' object has no attribute 'isna'

Options

Stats

Operations in general exclude missing data. Performing a descriptive statistic

In [91]:
df
Out[91]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [92]:
df.mean()
Out[92]:
A   -0.261554B    0.814918C   -0.213363D    5.000000dtype: float64
In [93]:
df.mean(1)  #Same operation on the other axis
Out[93]:
2017-01-12    1.4446422017-01-13    1.3381932017-01-14    1.8968892017-01-15    0.5195172017-01-16    1.8648382017-01-17    0.945923Freq: D, dtype: float64
In [94]:
s= pd.Series([1,2,3,np.nan,4,5],index=dates).shift(2) # Operating with objects that have different dimensionality and need alignment. In addition, pandas automatically broadcasts along the specified dimension.s
Out[94]:
2017-01-12    NaN2017-01-13    NaN2017-01-14    1.02017-01-15    2.02017-01-16    3.02017-01-17    NaNFreq: D, dtype: float64
In [95]:
df
Out[95]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [96]:
df.sub(s,axis="index")  #dataFrame与series的减法
Out[96]:
 ABCD2017-01-12NaNNaNNaNNaN2017-01-13NaNNaNNaNNaN2017-01-14-0.4107810.392792-0.3944554.02017-01-15-2.151958-2.655249-4.1147253.02017-01-16-4.3233040.143659-2.3610042.02017-01-17NaNNaNNaNNaN

Apply

In [97]:
df
Out[97]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [98]:
df.apply(np.cumsum)  #行叠加。
Out[98]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224951.167192102017-01-14-0.0691292.0152861.772737152017-01-15-0.2210871.360038-0.341988202017-01-16-1.5443924.5036970.297008252017-01-17-1.5693264.889508-1.28017730
In [99]:
df.apply(lambda x: x.max()-x.min())
Out[99]:
A    1.912523B    3.798908C    2.893293D    0.000000dtype: float64

Histogramming

In [100]:
s= pd.Series(np.random.randint(0,7,size=10))s
Out[100]:
0    41    52    23    04    55    36    47    38    39    0dtype: int32
In [101]:
s.value_counts()
Out[101]:
3    35    24    20    22    1dtype: int64

String Methods

Series is equipped with a set of string processing methods in the str attribute that make it easy to operate on each element of the array, as in the code snippet below. Note that pattern-matching in str generally uses regular expressions by default (and in some cases always uses them). See more at Vectorized String Methods.

In [102]:
s= pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])s.str.lower()
Out[102]:
0       a1       b2       c3    aaba4    baca5     NaN6    caba7     dog8     catdtype: object
In [103]:
s
Out[103]:
0       A1       B2       C3    Aaba4    Baca5     NaN6    CABA7     dog8     catdtype: object

Merge 合并

Concat

pandas provides various facilities for easily combining together Series, DataFrame, and Panel objects with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations. See the Merging section Concatenating pandas objects together with concat():

In [104]:
df
Out[104]:
 ABCD2017-01-120.0000000.0000000.77856752017-01-13-0.6583480.6224950.38862552017-01-140.5892191.3927920.60554552017-01-15-0.151958-0.655249-2.11472552017-01-16-1.3233043.1436590.63899652017-01-17-0.0249350.385811-1.5771855
In [105]:
df=pd.DataFrame(np.random.randn(10,4))df
Out[105]:
 012300.111766-0.5051252.1560290.41915211.0688701.1805870.3613451.09055420.4889970.281507-0.738345-0.2429743-1.8467091.686173-0.202319-1.15198340.573012-1.9791891.5447681.5945955-0.954571-0.6967880.270959-2.2967206-1.5119461.7961130.3994930.41266470.089844-0.545153-0.315653-0.2358288-0.7471401.222900-1.6508120.29243290.6598550.5012650.3639781.722914
In [106]:
# break it into piecespieces=[df[:3],df[3:7],df[7:]]pd.concat(pieces)
Out[106]:
 012300.111766-0.5051252.1560290.41915211.0688701.1805870.3613451.09055420.4889970.281507-0.738345-0.2429743-1.8467091.686173-0.202319-1.15198340.573012-1.9791891.5447681.5945955-0.954571-0.6967880.270959-2.2967206-1.5119461.7961130.3994930.41266470.089844-0.545153-0.315653-0.2358288-0.7471401.222900-1.6508120.29243290.6598550.5012650.3639781.722914
In [107]:
pieces
Out[107]:
[          0         1         2         3 0  0.111766 -0.505125  2.156029  0.419152 1  1.068870  1.180587  0.361345  1.090554 2  0.488997  0.281507 -0.738345 -0.242974,           0         1         2         3 3 -1.846709  1.686173 -0.202319 -1.151983 4  0.573012 -1.979189  1.544768  1.594595 5 -0.954571 -0.696788  0.270959 -2.296720 6 -1.511946  1.796113  0.399493  0.412664,           0         1         2         3 7  0.089844 -0.545153 -0.315653 -0.235828 8 -0.747140  1.222900 -1.650812  0.292432 9  0.659855  0.501265  0.363978  1.722914]

Join

SQL style merges. See the Database style joining

In [108]:
left=pd.DataFrame({"key":["foo","foo"],"lval":[1,2]})right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
In [109]:
left
Out[109]:
 keylval0foo11foo2
In [110]:
right
Out[110]:
 keyrval0foo41foo5
In [111]:
pd.merge(left,right,on="key")
Out[111]:
 keylvalrval0foo141foo152foo243foo25
In [112]:
left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]})right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]})
In [113]:
left
Out[113]:
 keylval0foo11bar2
In [114]:
right
Out[114]:
 keyrval0foo41bar5
In [115]:
pd.merge(left,right,on="key")
Out[115]:
 keylvalrval0foo141bar25

Append

In [116]:
df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D'])df
Out[116]:
 ABCD0-0.8524511.074357-0.5918920.9509821-0.9775801.6563740.6936570.71883220.303269-0.881728-1.5093211.21984930.6557511.2356601.7290381.07494840.658413-1.215348-1.1396230.75377251.3451151.420212-0.124543-0.09926561.1296230.597484-0.804759-0.5682667-0.7705700.540917-0.261607-0.083751
In [117]:
s=df.iloc[3]s
Out[117]:
A    0.655751B    1.235660C    1.729038D    1.074948Name: 3, dtype: float64
In [118]:
df.append(s,ignore_index=True)
Out[118]:
 ABCD0-0.8524511.074357-0.5918920.9509821-0.9775801.6563740.6936570.71883220.303269-0.881728-1.5093211.21984930.6557511.2356601.7290381.07494840.658413-1.215348-1.1396230.75377251.3451151.420212-0.124543-0.09926561.1296230.597484-0.804759-0.5682667-0.7705700.540917-0.261607-0.08375180.6557511.2356601.7290381.074948

Grouping

By “group by” we are referring to a process involving one or more of the following steps • Splitting the data into groups based on some criteria • Applying a function to each group independently • Combining the results into a data structure

In [119]:
df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',                          'foo', 'bar', 'foo', 'foo'],                   'B' : ['one', 'one', 'two', 'three',                          'two', 'two', 'one', 'three'],                   'C' : np.random.randn(8),                   'D' : np.random.randn(8)})df
Out[119]:
 ABCD0fooone-0.523738-1.3635191barone-0.071920-2.6180272footwo-2.712421-0.4073723barthree-0.635898-1.9428544footwo0.952073-0.5461105bartwo1.474296-0.9822386fooone-0.529788-0.2133977foothree0.877394-0.791663
In [120]:
df.groupby("A").sum()
Out[120]:
 CDA  bar0.766479-5.543120foo-1.936480-3.322062
In [121]:
df.groupby(["A","B"]).sum()  #Grouping by multiple columns forms a hierarchical index, which we then apply the function.
Out[121]:
  CDAB  barone-0.071920-2.618027three-0.635898-1.942854two1.474296-0.982238fooone-1.053527-1.576917three0.877394-0.791663two-1.760347-0.953482

Reshaping

Stack

In [122]:
tuples = list(zip([['bar', 'bar', 'baz', 'baz',                     'foo', 'foo', 'qux', 'qux'],                    ['one', 'two', 'one', 'two',                     'one', 'two', 'one', 'two']]))tuples
Out[122]:
[(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],), (['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'],)]
In [123]:
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz',                     'foo', 'foo', 'qux', 'qux'],                    ['one', 'two', 'one', 'two',                     'one', 'two', 'one', 'two']]))tuples
Out[123]:
[('bar', 'one'), ('bar', 'two'), ('baz', 'one'), ('baz', 'two'), ('foo', 'one'), ('foo', 'two'), ('qux', 'one'), ('qux', 'two')]
In [124]:
index=pd.MultiIndex.from_tuples(tuples,names=["first","second"])index
Out[124]:
MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'], ['one', 'two']],           labels=[[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]],           names=['first', 'second'])
In [125]:
df=pd.DataFrame(np.random.randn(8,2),index=index,columns=['A', 'B'])df
Out[125]:
  ABfirstsecond  barone-1.101051-1.126231two-0.395652-0.313567bazone1.378579-1.637869two0.665960-0.259749fooone-0.2561811.260131two-0.9947200.506272quxone-0.4226490.191402two-0.1020850.975210
In [126]:
df2=df[:4]df2
Out[126]:
  ABfirstsecond  barone-1.101051-1.126231two-0.395652-0.313567bazone1.378579-1.637869two0.665960-0.259749
In [127]:
stacked= df2.stack()stacked
Out[127]:
first  second   bar    one     A   -1.101051               B   -1.126231       two     A   -0.395652               B   -0.313567baz    one     A    1.378579               B   -1.637869       two     A    0.665960               B   -0.259749dtype: float64

With a “stacked” DataFrame or Series (having a MultiIndex as the index), the inverse operation of stack() is unstack(), which by default unstacks the last level:

In [128]:
stacked.unstack()
Out[128]:
  ABfirstsecond  barone-1.101051-1.126231two-0.395652-0.313567bazone1.378579-1.637869two0.665960-0.259749
In [129]:
stacked.unstack(1)
Out[129]:
 secondonetwofirst   barA-1.101051-0.395652B-1.126231-0.313567bazA1.3785790.665960B-1.637869-0.259749
In [130]:
stacked.unstack(0)
Out[130]:
 firstbarbazsecond   oneA-1.1010511.378579B-1.126231-1.637869twoA-0.3956520.665960B-0.313567-0.259749

Pivot Tables

In [131]:
df = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,                   'B' : ['A', 'B', 'C'] * 4,                   'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,                   'D' : np.random.randn(12),                   'E' : np.random.randn(12)})df
Out[131]:
 ABCDE0oneAfoo0.0392300.1342611oneBfoo0.952890-0.4991832twoCfoo-0.778814-0.6557353threeAbar0.7988640.0251094oneBbar-0.580050-1.7116725oneCbar0.004300-0.4335916twoAfoo0.229248-2.6488147threeBfoo0.5064880.6303738oneCfoo-0.3156670.0317649oneAbar-1.5474100.74382510twoBbar-0.4809580.36525511threeCbar1.7429480.692884
In [4]:
pd.pivot_table(df,values="D",index=["A","B"],columns=["C"])
Out[4]:
 CbarfooAB  oneA0.932814-1.440079B0.0602521.071877C2.8797790.355274threeA-0.328442NaNBNaN-2.544812C-1.879058NaNtwoANaN-1.987377B0.220517NaNCNaN-0.082820

Time Series

pandas has simple, powerful, and efficient functionality for performing resampling operations during frequency conversion (e.g., converting secondly data into 5-minutely data). This is extremely common in, but not limited to, financial applications.

In [132]:
rng=pd.date_range("1/2/2017",periods=100,freq="S")rng
Out[132]:
DatetimeIndex(['2017-01-02 00:00:00', '2017-01-02 00:00:01',               '2017-01-02 00:00:02', '2017-01-02 00:00:03',               '2017-01-02 00:00:04', '2017-01-02 00:00:05',               '2017-01-02 00:00:06', '2017-01-02 00:00:07',               '2017-01-02 00:00:08', '2017-01-02 00:00:09',               '2017-01-02 00:00:10', '2017-01-02 00:00:11',               '2017-01-02 00:00:12', '2017-01-02 00:00:13',               '2017-01-02 00:00:14', '2017-01-02 00:00:15',               '2017-01-02 00:00:16', '2017-01-02 00:00:17',               '2017-01-02 00:00:18', '2017-01-02 00:00:19',               '2017-01-02 00:00:20', '2017-01-02 00:00:21',               '2017-01-02 00:00:22', '2017-01-02 00:00:23',               '2017-01-02 00:00:24', '2017-01-02 00:00:25',               '2017-01-02 00:00:26', '2017-01-02 00:00:27',               '2017-01-02 00:00:28', '2017-01-02 00:00:29',               '2017-01-02 00:00:30', '2017-01-02 00:00:31',               '2017-01-02 00:00:32', '2017-01-02 00:00:33',               '2017-01-02 00:00:34', '2017-01-02 00:00:35',               '2017-01-02 00:00:36', '2017-01-02 00:00:37',               '2017-01-02 00:00:38', '2017-01-02 00:00:39',               '2017-01-02 00:00:40', '2017-01-02 00:00:41',               '2017-01-02 00:00:42', '2017-01-02 00:00:43',               '2017-01-02 00:00:44', '2017-01-02 00:00:45',               '2017-01-02 00:00:46', '2017-01-02 00:00:47',               '2017-01-02 00:00:48', '2017-01-02 00:00:49',               '2017-01-02 00:00:50', '2017-01-02 00:00:51',               '2017-01-02 00:00:52', '2017-01-02 00:00:53',               '2017-01-02 00:00:54', '2017-01-02 00:00:55',               '2017-01-02 00:00:56', '2017-01-02 00:00:57',               '2017-01-02 00:00:58', '2017-01-02 00:00:59',               '2017-01-02 00:01:00', '2017-01-02 00:01:01',               '2017-01-02 00:01:02', '2017-01-02 00:01:03',               '2017-01-02 00:01:04', '2017-01-02 00:01:05',               '2017-01-02 00:01:06', '2017-01-02 00:01:07',               '2017-01-02 00:01:08', '2017-01-02 00:01:09',               '2017-01-02 00:01:10', '2017-01-02 00:01:11',               '2017-01-02 00:01:12', '2017-01-02 00:01:13',               '2017-01-02 00:01:14', '2017-01-02 00:01:15',               '2017-01-02 00:01:16', '2017-01-02 00:01:17',               '2017-01-02 00:01:18', '2017-01-02 00:01:19',               '2017-01-02 00:01:20', '2017-01-02 00:01:21',               '2017-01-02 00:01:22', '2017-01-02 00:01:23',               '2017-01-02 00:01:24', '2017-01-02 00:01:25',               '2017-01-02 00:01:26', '2017-01-02 00:01:27',               '2017-01-02 00:01:28', '2017-01-02 00:01:29',               '2017-01-02 00:01:30', '2017-01-02 00:01:31',               '2017-01-02 00:01:32', '2017-01-02 00:01:33',               '2017-01-02 00:01:34', '2017-01-02 00:01:35',               '2017-01-02 00:01:36', '2017-01-02 00:01:37',               '2017-01-02 00:01:38', '2017-01-02 00:01:39'],              dtype='datetime64[ns]', freq='S')
In [133]:
ts =pd.Series(np.random.randint(0,500,len(rng)),index=rng)ts
Out[133]:
2017-01-02 00:00:00    2512017-01-02 00:00:01     632017-01-02 00:00:02    1082017-01-02 00:00:03    2882017-01-02 00:00:04    4912017-01-02 00:00:05    4902017-01-02 00:00:06    3432017-01-02 00:00:07    3572017-01-02 00:00:08     722017-01-02 00:00:09    1712017-01-02 00:00:10    3242017-01-02 00:00:11    2812017-01-02 00:00:12    1762017-01-02 00:00:13     142017-01-02 00:00:14    4952017-01-02 00:00:15    1502017-01-02 00:00:16     692017-01-02 00:00:17    1442017-01-02 00:00:18    1262017-01-02 00:00:19    3682017-01-02 00:00:20    1292017-01-02 00:00:21    3862017-01-02 00:00:22    2282017-01-02 00:00:23    4582017-01-02 00:00:24     982017-01-02 00:00:25    2442017-01-02 00:00:26    2062017-01-02 00:00:27     982017-01-02 00:00:28     922017-01-02 00:00:29    259                      ... 2017-01-02 00:01:10    1272017-01-02 00:01:11    3422017-01-02 00:01:12    1852017-01-02 00:01:13    1232017-01-02 00:01:14     732017-01-02 00:01:15    1322017-01-02 00:01:16    4622017-01-02 00:01:17    3172017-01-02 00:01:18    1802017-01-02 00:01:19    2472017-01-02 00:01:20     972017-01-02 00:01:21    4012017-01-02 00:01:22    3422017-01-02 00:01:23    3822017-01-02 00:01:24    3042017-01-02 00:01:25     472017-01-02 00:01:26    1932017-01-02 00:01:27    3342017-01-02 00:01:28    1962017-01-02 00:01:29    2972017-01-02 00:01:30    1952017-01-02 00:01:31    2362017-01-02 00:01:32    2002017-01-02 00:01:33    4902017-01-02 00:01:34    1962017-01-02 00:01:35    2012017-01-02 00:01:36    3972017-01-02 00:01:37    4942017-01-02 00:01:38    4822017-01-02 00:01:39    267Freq: S, Length: 100, dtype: int32
In [7]:
ts.resample("5Min").sum()
Out[7]:
2017-01-02    22939Freq: 5T, dtype: int32
In [9]:
ts.resample("1Min").sum()
Out[9]:
2017-01-02 00:00:00    138962017-01-02 00:01:00     9043Freq: T, dtype: int32

Time zone representation.零时区 UTC表示。

In [10]:
rng= pd.date_range("2/1/2017 00:00",periods=5,freq="D")rng
Out[10]:
DatetimeIndex(['2017-02-01', '2017-02-02', '2017-02-03', '2017-02-04',               '2017-02-05'],              dtype='datetime64[ns]', freq='D')
In [12]:
ts=pd.Series(np.random.randn(len(rng)),index=rng)ts
Out[12]:
2017-02-01    0.3295942017-02-02    2.0973192017-02-03    1.8520232017-02-04   -0.2134522017-02-05    0.160873Freq: D, dtype: float64
In [13]:
tsUtc=ts.tz_localize("UTC")tsUtc
Out[13]:
2017-02-01 00:00:00+00:00    0.3295942017-02-02 00:00:00+00:00    2.0973192017-02-03 00:00:00+00:00    1.8520232017-02-04 00:00:00+00:00   -0.2134522017-02-05 00:00:00+00:00    0.160873Freq: D, dtype: float64

Convert to another time zone.时区转换。

In [14]:
tsUtc.tz_convert("US/Eastern")
Out[14]:
2017-01-31 19:00:00-05:00    0.3295942017-02-01 19:00:00-05:00    2.0973192017-02-02 19:00:00-05:00    1.8520232017-02-03 19:00:00-05:00   -0.2134522017-02-04 19:00:00-05:00    0.160873Freq: D, dtype: float64
In [15]:
tsUtc
Out[15]:
2017-02-01 00:00:00+00:00    0.3295942017-02-02 00:00:00+00:00    2.0973192017-02-03 00:00:00+00:00    1.8520232017-02-04 00:00:00+00:00   -0.2134522017-02-05 00:00:00+00:00    0.160873Freq: D, dtype: float64

Converting between time span representations

In [16]:
rng=pd.date_range("1/8/2017",periods=5,freq="M")rng
Out[16]:
DatetimeIndex(['2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30',               '2017-05-31'],              dtype='datetime64[ns]', freq='M')
In [18]:
ts=pd.Series(np.random.randn(len(rng)),rng)ts
Out[18]:
2017-01-31    0.9045232017-02-28   -0.4701442017-03-31   -0.3732442017-04-30    0.8604482017-05-31    0.176226Freq: M, dtype: float64
In [20]:
ps=ts.to_period()ps
Out[20]:
2017-01    0.9045232017-02   -0.4701442017-03   -0.3732442017-04    0.8604482017-05    0.176226Freq: M, dtype: float64
In [21]:
ps.to_timestamp()
Out[21]:
2017-01-01    0.9045232017-02-01   -0.4701442017-03-01   -0.3732442017-04-01    0.8604482017-05-01    0.176226Freq: MS, dtype: float64
In [22]:
ps
Out[22]:
2017-01    0.9045232017-02   -0.4701442017-03   -0.3732442017-04    0.8604482017-05    0.176226Freq: M, dtype: float64

Converting between period and timestamp enables some convenient arithmetic functions to be used. In the following example, we convert a quarterly frequency with year ending in November to 9am of the end of the month following the quarter end:

In [23]:
prng=pd.period_range("1990Q1","2017Q4",freq="Q-NOV")prng
Out[23]:
PeriodIndex(['1990Q1', '1990Q2', '1990Q3', '1990Q4', '1991Q1', '1991Q2',             '1991Q3', '1991Q4', '1992Q1', '1992Q2',             ...             '2015Q3', '2015Q4', '2016Q1', '2016Q2', '2016Q3', '2016Q4',             '2017Q1', '2017Q2', '2017Q3', '2017Q4'],            dtype='period[Q-NOV]', length=112, freq='Q-NOV')
In [25]:
ts= pd.Series(np.random.randn(len(prng)),prng)ts.head()
Out[25]:
1990Q1    1.1930311990Q2    0.6216271990Q3   -0.2355531990Q4    0.6429381991Q1    0.247024Freq: Q-NOV, dtype: float64
In [26]:
ts.index=(prng.asfreq("M","e")+1).asfreq("H","s")+9ts.head()
Out[26]:
1990-03-01 09:00    1.1930311990-06-01 09:00    0.6216271990-09-01 09:00   -0.2355531990-12-01 09:00    0.6429381991-03-01 09:00    0.247024Freq: H, dtype: float64

Categoricals

In [34]:
df = pd.DataFrame({"id":[1,2,3,4,5,6],"raw_grade":["a","a","c","b","b","f"]})df
Out[34]:
 idraw_grade01a12a23c34b45b56f

Convert the raw grades to a categorical data type.

In [35]:
df["grade"]=df.raw_grade.astype("category")df
Out[35]:
 idraw_gradegrade01aa12aa23cc34bb45bb56ff
In [36]:
df.grade #Convert the raw grades to a categorical data type
Out[36]:
0    a1    a2    c3    b4    b5    fName: grade, dtype: categoryCategories (4, object): [a, b, c, f]
In [37]:
# Rename the categories to more meaningful names (assigning to Series.cat.categories is inplace!)df.grade.cat.categories=["very good","good","nomal","bad"]df
Out[37]:
 idraw_gradegrade01avery good12avery good23cnomal34bgood45bgood56fbad
In [38]:
# Reorder the categories and simultaneously add the missing categories (methods under Series .cat return a new Series per default).df.grade=df.grade.cat.set_categories(["very bad", "bad", "medium","good", "very good"])df.grade
Out[38]:
0    very good1    very good2          NaN3         good4         good5          badName: grade, dtype: categoryCategories (5, object): [very bad, bad, medium, good, very good]
In [39]:
df
Out[39]:
 idraw_gradegrade01avery good12avery good23cNaN34bgood45bgood56fbad

Sorting is per order in the categories, not lexical order

In [40]:
df.sort_values(by="grade")
Out[40]:
 idraw_gradegrade23cNaN56fbad34bgood45bgood01avery good12avery good

Grouping by a categorical column shows also empty categories

In [41]:
df.groupby("grade").size()
Out[41]:
gradevery bad     0bad          1medium       0good         2very good    2dtype: int64

Plotting

In [43]:
ts=pd.Series(np.random.randn(1000),index=pd.date_range("1/1/2017",periods=1000))ts.head()
Out[43]:
2017-01-01   -0.7450672017-01-02   -0.0708952017-01-03    0.2335422017-01-04   -0.2065972017-01-05    0.891064Freq: D, dtype: float64
In [45]:
ts=ts.cumsum()ts.head()
Out[45]:
2017-01-01   -0.7450672017-01-02   -1.5610292017-01-03   -2.1434492017-01-04   -2.9324662017-01-05   -2.830418Freq: D, dtype: float64
In [48]:
ts.plot()
Out[48]:
<matplotlib.axes._subplots.AxesSubplot at 0x19bf6a6e278>
In [50]:
df=pd.DataFrame(np.random.randn(1000,4),index=ts.index,columns=["A","B","C","D"])df.head()
Out[50]:
 ABCD2017-01-01-1.940139-0.476590-0.1540661.6928122017-01-020.3998910.2689760.596209-0.4849792017-01-030.814519-0.142193-0.084394-0.6873422017-01-040.385848-1.230059-0.093327-0.0966522017-01-050.407435-0.8493470.3791920.172933
In [51]:
df=df.cumsum()
In [53]:
plt.figure()df.plot()plt.legend(loc="best")plt.show()
<matplotlib.figure.Figure at 0x19bf8855da0>
<matplotlib.figure.Figure at 0x19bf897dc88>

Getting Data In/Out

CSV

In [ ]:
df.to_csv("foo.csv")
In [ ]:
pd.read_csv("foo.csv")

HDF5

In [ ]:
df.to_hdf("foo.h5","df")
In [ ]:
pd.read_hdf("foo.h5","df")

Excel

In [ ]:
df.to_excel('foo.xlsx', sheet_name='Sheet1')
In [ ]:
pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA'])
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
原创粉丝点击