数据加载、存储与文件格式 利用Python进行数据分析 第6章

xiaoxiao2021-02-27  310

数据加载、存储与文件格式

源码下载链接

读写文本格式的数据

本文对Python中的数据加载、存储与文件格式做了一个简要的说明,实际应用中的情况更加复杂,每个小节的内容都很有限。如果用到相关内容,各位读者还需上网查找补充。

#read_csv和read_table都是从文件、URL、文本型对象中加载带分隔符的数据, # read_csv默认分隔符为逗号,而read_table的默认分隔符为制表符(“\t”) import pandas as pd df=pd.read_csv('ch06/ex1.csv')#a,b,c,d,message是文件中的第一行 df abcdmessage01234hello15678world29101112foo pd.read_table('ch06/ex1.csv',sep=',') abcdmessage01234hello15678world29101112foo pd.read_csv('ch06/ex2.csv',header=None)#ex2.csv中没有标题行,pandas默认分配列名 0123401234hello15678world29101112foo pd.read_csv('ch06/ex2.csv',names=['a','b','c','d','message'])#自定义列名 abcdmessage01234hello15678world29101112foo names=['a','b','c','d','message'] pd.read_csv('ch06/ex2.csv',names=names,index_col='message')#将message做出DataFrame的索引 abcdmessagehello1234world5678foo9101112 #如果希望将多个列做成一个层次化索引,只需传入由列编号或列名组成的列表即可 parsed=pd.read_csv('ch06/csv_mindex.csv',index_col=['key1','key2']) parsed value1value2key1key2onea12b34c56d78twoa910b1112c1314d1516 #有些表格不是用固定的分割符去分割字段的(比如空白符或其他) #对于这种情况,可以编写一个正则表达式来作为read_table的分隔符 list(open('ch06/ex3.txt')) [’ A B C\n’, ‘aaa -0.264438 -1.026059 -0.619500\n’, ‘bbb 0.927272 0.302904 -0.032399\n’, ‘ccc -0.264273 -0.386314 -0.217601\n’, ‘ddd -0.871858 -0.348382 1.100491\n’] result=pd.read_table('ch06/ex3.txt',sep='\s+')#分隔符用正则表达式 result ABCaaa-0.264438-1.026059-0.619500bbb0.9272720.302904-0.032399ccc-0.264273-0.386314-0.217601ddd-0.871858-0.3483821.100491 pd.read_csv('ch06/ex4.csv',skiprows=[0,2,3])#skiprows跳过文件的第一、三、四行 abcdmessage01234hello15678world29101112foo #pandas处理缺失值,会识别NA、-1、#IND以及NULL等 result=pd.read_csv('ch06/ex5.csv') result somethingabcdmessage0one123.04NaN1two56NaN8world2three91011.012foo pd.isnull(result) somethingabcdmessage0FalseFalseFalseFalseFalseTrue1FalseFalseFalseTrueFalseFalse2FalseFalseFalseFalseFalseFalse #na_values可以接受一组用于表示缺失值的字符串 result=pd.read_csv('ch06/ex5.csv',na_values=['NULL']) result somethingabcdmessage0one123.04NaN1two56NaN8world2three91011.012foo #可以用一个字典为各列指定不同的NA标记值 sentinels={'message':['foo','NA'],'something':['two']} pd.read_csv('ch06/ex5.csv',na_values=sentinels) somethingabcdmessage0one123.04NaN1NaN56NaN8world2three91011.012NaN

逐块读取文本文件

result=pd.read_csv('ch06/ex6.csv') result onetwothreefourkey00.467976-0.038649-0.295344-1.824726L1-0.3588931.4044530.704965-0.200638B2-0.5018400.659254-0.421691-0.057688G30.2048861.0741341.388361-0.982404R40.354628-0.1331160.283763-0.837063Q51.8174800.7422730.419395-2.251035Q6-0.7767640.935518-0.332872-1.875641U7-0.9131351.530624-0.5726570.477252K80.358480-0.497572-0.3670160.507702S9-1.740877-1.160417-1.6378302.172201G100.240564-0.3282491.2521551.0727968110.7640181.165476-0.6395441.495258R120.571035-0.3105370.582437-0.2987651132.3176580.430710-1.3342160.199679P141.547771-1.119753-2.2776340.329586J15-1.3106080.401719-1.0009871.156708E16-0.0884960.6347120.1533240.415335B17-0.018663-0.247487-1.4465220.750938A18-0.070127-1.5790970.1208920.671432F19-0.194678-0.4920392.3596050.319810H20-0.2486180.868707-0.492226-0.717959W21-1.091549-0.867110-0.647760-0.832562C220.641404-0.138822-0.621963-0.284839C231.2164080.9926870.165162-0.069619V24-0.5644740.7928320.7470530.571675I251.759879-0.515666-0.2304811.362317S260.1262660.3092810.382820-0.239199L271.334360-0.100152-0.840731-0.643967628-0.7376200.278087-0.053235-0.950972J29-1.148486-0.986292-0.1449630.124362Y………………99700.633495-0.1865240.9276270.143164499710.308636-0.1128570.762842-1.07297719972-1.627051-0.9781510.154745-1.229037Z99730.3148470.0979890.1996080.955193P99741.6669070.9920050.496128-0.686391S99750.0106030.708540-1.2587110.226541K99760.118693-0.714455-0.501342-0.254764K99770.302616-2.011527-0.6280850.768827H9978-0.0985721.769086-0.215027-0.053076A9979-0.0190581.9649940.738538-0.883776F9980-0.5953490.001781-1.423355-1.458477M99811.392170-1.396560-1.425306-0.847535H9982-0.896029-0.1522871.9244830.36518469983-2.274642-0.9018741.5003520.996541N9984-0.3018981.0199061.1021602.624526I9985-2.548389-0.5853741.496201-0.718815D9986-0.0645880.759292-1.568415-0.420933E9987-0.143365-1.111760-1.8155810.43527429988-0.070412-1.0559210.338017-0.440763X99890.6491480.994273-1.3842270.485120Q9990-0.3707690.404356-1.051628-1.05089989991-0.4099800.155627-0.8189901.277350W99920.301214-1.1112030.6682580.671922A99931.8211170.4164450.1738740.505118X99940.0688041.3227590.8023460.223618H99952.311896-0.417070-1.409599-0.515821L9996-0.479893-0.6504190.745152-0.646038E99970.5233310.7871120.4860661.093156K9998-0.3625590.598894-1.8432010.887292G9999-0.096376-1.012999-0.657431-0.5733150

10000 rows × 5 columns

#如果只想读取几行,通过nrows进行指定即可 pd.read_csv('ch06/ex6.csv',nrows=5) onetwothreefourkey00.467976-0.038649-0.295344-1.824726L1-0.3588931.4044530.704965-0.200638B2-0.5018400.659254-0.421691-0.057688G30.2048861.0741341.388361-0.982404R40.354628-0.1331160.283763-0.837063Q #要逐块读取文件,需要设置chunksize(行数) chunker=pd.read_csv('ch06/ex6.csv',chunksize=1000) chunker #read_csv返回的这个TextParser对象可以根据chunksize对文件进行逐块迭代 from pandas import Series,DataFrame tot=Series([]) for piece in chunker: tot=tot.add(piece['key'].value_counts(),fill_value=0) tot=tot.sort_values(ascending=False) tot[:10] E 368.0 X 364.0 L 346.0 O 343.0 Q 340.0 M 338.0 J 337.0 F 335.0 K 334.0 H 330.0 dtype: float64 #TextParse还有一个get_chunk方法,它可以使你读取任意大小的块

将数据写入到文本格式

data=pd.read_csv('ch06/ex5.csv') data somethingabcdmessage0one123.04NaN1two56NaN8world2three91011.012foo data.to_csv('ch06/myout.csv') data.to_csv('ch06/myout1.csv',sep='|') data.to_csv('ch06/myout2.csv',na_rep='MULL')#空值替换为NULL输出 data.to_csv('ch06/myout3.csv',index=False,header=False)#不输出行和列标签 data.to_csv('ch06/myout4.csv',index=False,columns=['a','b','c'])#写出一部分的列,并以指定顺序排序 #Series也有一个to_csv方法 import numpy as np dates=pd.date_range('1/1/2000',periods=7) ts=Series(np.arange(7),index=dates) ts.to_csv('ch06/tseries.csv') Series.from_csv('ch06/tseries.csv',parse_dates=True) 2000-01-01 0 2000-01-02 1 2000-01-03 2 2000-01-04 3 2000-01-05 4 2000-01-06 5 2000-01-07 6 dtype: int64 ##手工处理分隔符格式 #对于任何单字符分隔符文件,可以直接使用python内置的csv模块 import csv f=open('ch06/ex7.csv') reader=csv.reader(f) #对这个reader进行迭代将会为每行产生一个列表,并移除了所有的引号 for line in reader: print(line) [‘a’, ‘b’, ‘c’] [‘1’, ‘2’, ‘3’] [‘1’, ‘2’, ‘3’, ‘4’] #为了是数据格式合乎要求,需要对其做一些整理工作 lines=list(csv.reader(open('ch06/ex7.csv'))) header,values=lines[0],lines[1:] data_dict={h:v for h,v in zip(header,zip(*values))} data_dict {‘a’: (‘1’, ‘1’), ‘b’: (‘2’, ‘2’), ‘c’: (‘3’, ‘3’)} #可以使用csv.writer手工输出分隔符文件 with open('ch06/mydata.csv','w') as f: writer=csv.writer(f) writer.writerow(('one','two','three')) writer.writerow(('1','2','3')) writer.writerow(('4','5','6')) writer.writerow(('7','8','9'))

JSON数据

obj=""" {"name":"Wes","places_lived":["United States","Spain","Germany"], "pet":null, "siblings":[{"name":"Scott","age":25,"pet":"Zuko"}, {"name":"Katie","age":33,"pet":"Cisco"}]} """ import json result=json.loads(obj) result {‘name’: ‘Wes’, ‘pet’: None, ‘places_lived’: [‘United States’, ‘Spain’, ‘Germany’], ‘siblings’: [{‘age’: 25, ‘name’: ‘Scott’, ‘pet’: ‘Zuko’}, {‘age’: 33, ‘name’: ‘Katie’, ‘pet’: ‘Cisco’}]} #json.dumps则将Python对象转换成JSON格式 asjson=json.dumps(result) #将一个JSON对象转换为DataFrame siblings=DataFrame(result['siblings'],columns=['name','age']) siblings nameage0Scott251Katie33 siblings.to_json('ch06/json.csv')

XML和HTML:Web信息收集

利用lxml.objectify解析XML

from lxml import objectify path='ch06/mta_perf/Performance_MNR.xml' parsed=objectify.parse(open(path)) root=parsed.getroot() data=[] skip_fields=['PARENT_SEQ','INDICATOR_SEQ','DESIRED_CHANGE','DECIMAL_PLACES'] for elt in root.INDICATOR: el_data={} for child in elt.getchildren(): if child.tag in skip_fields: continue el_data[child.tag]=child.pyval data.append(el_data) perf=DataFrame(data) perf AGENCY_NAMECATEGORYDESCRIPTIONFREQUENCYINDICATOR_NAMEINDICATOR_UNITMONTHLY_ACTUALMONTHLY_TARGETPERIOD_MONTHPERIOD_YEARYTD_ACTUALYTD_TARGET0Metro-North RailroadService IndicatorsPercent of commuter trains that arrive at thei…MOn-Time Performance (West of Hudson)
转载请注明原文地址: https://www.6miu.com/read-5979.html

最新回复(0)