2015-03-19-数据规整化(3)-字符串操作

来源:互联网 发布:iphone使用电脑的网络 编辑:程序博客网 时间:2024/04/29 09:27
--字符串操作
---------------------------------------------------------------------------------------
-----(一)字符串对象方法
1.split
val='a,b, guido'
val.split(',')
2.strip
pieces=[x.strip() for x in val.split(',')]
pieces
3.join
'::'.join(pieces)
4.子串定位
'guido' in val
val.index(',')
val.find(':')
5.其他
val.count(',')
val.replace(',','::')

val.replace(',','')


---------------------------------------------------------------------------------------
-----(二)正则表达式 ???
import re
text="foo   bar\t baz   \tqux"
re.split('\s+',text)


regex=re.compile('\s+')
regex.split(text)
得到匹配regex的所有模式
regex.findall(text)


text="""Dave dave@google.com
Steve steve@gmail.com
Rob rob@gmail.com
Ryan ryan@yahoo.com
"""
pattern=r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}'
regex=re.compile(pattern,flags=re.IGNORECASE)
regex.findall(text)
m=regex.search(text)
text[m.start():m.end()]
print regex.match(text)


print regex.sub('REDACTED',text)


pattern=r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
regex=re.compile(pattern,flags=re.IGNORECASE)
m=regex.match('wesm@bright.net')
m.groups()
regex.findall(text)


print regex.sub(r'Username:\1,Domain:\2,Suffix:\3',text)


regex=re.compile(r"""
(?P<username>[A-Z0-9._%+-]+)
@
(?P<domain>[A-Z0-9.-]+)
\.
(?P<suffix>[A-Z]{2,4})""",flags=re.IGNORECASE|re.VERBOSE)
m=regex.match('wesm@bright.net')
m.groupdict()
---------------------------------------------------------------------------------------
-----(三)pandas中矢量化的字符串函数
data={'Dave':'dave@google.com','Steve':'steve@gmail.com','Rob':'rob@gmail.com','Wes':np.nan}
data=Series(data)
data
data.isnull()
data.str.contains('gmail')
data.str.findall(pattern,flags=re.IGNORECASE)


matches=atdata.str.match(pattern,flags=re.IGNORECASE)
matches
matches.str.get(1)
matches.str[0]

data.str[:5]


---------------------------------------------------------------------------------------
-----(四)示例 USDA食品数据库
import json
db=json.load(open(r'ch07\foods-2011-10-03.json'))
len(db)
db[0].keys()
db[0]['nutrients'][0]
nutrients=DataFrame(db[0]['nutrients'])
nutrients[:7]


info_keys=['description','group','id','manufacturer']
info=DataFrame(db,columns=info_keys)
info[:5]
info
pd.value_counts(info.group)[:10]


nutrients=[]
for rec in db:
    fnuts=DataFrame(rec['nutrients'])
fnuts['id']=rec['id']
nutrients.append(fnuts)
nutrients=pd.concat(nutrients,ignore_index=True)


nutrients.duplicated().sum()
nutrients=nutrients.drop_duplicates()
col_mapping={'description':'food','group':'fgroup'}
info=info.rename(columns=col_mapping,copy=False)


col_mapping={'decsciption':'nutrient','group':'nutgroup'}
nutrients=nutrients.rename(columns=col_mapping,copy=False)


ndata=pd.merge(nutrients,info,on='id',how='outer')
ndata
ndata.ix[30000]


result=ndata.groupby(['nutrient','fgroup'])['value'].quantile(0.5)





0 0