1.当csv数量在10以下,每个csv量很小时:
import pandas as pd
def merge_csv_file(path=None, col_name=[], file_type='csv'):
"""
遍历并合并文件夹里的文件
:param path: 文件夹路径
:param col_name: 列名
:param file_type: 文件类型
:return:
"""
data = pd.DataFrame()
for _, _, filenames in os.walk(path):
if file_type == 'csv':
for filename in filenames:
if '.csv' in filename:
data1 = pd.read_csv(path + filename, names=col_name)
data = pd.concat([data, data1], axis=0)
return data
elif file_type == 'xlsx':
for filename in filenames:
if '.xlsx' in filename:
data1 = pd.read_excel(path + filename, names=col_name)
data = pd.concat([data, data1], axis=0)
return data
2.当csv很多,单个csv量也很大,内存不是很大,而且想要快速,可以用追加的方式
import os
import pandas as pd
def append_csv(csvs_path, save_file_path_and_name, save_col):
'''
:param csvs_path: 批量csv存放的位置
:param save_file_path_and_name: 合并后存放数据的文件路径+名称
:param save_col: 想要保存的列【加上这个可以保证数据按列存储是不会乱】
:return:
'''
for _, _, filenames in os.walk(csvs_path):
for i in filenames:
if '.csv' in i:
data = pd.read_csv(csvs_path + i)
data[save_col].to_csv(save_file_path_and_name, mode='a', header=False)
return True