pandas、numpy笔记

本文介绍了一系列Python中数据处理的方法和技巧,包括Pandas库的基本操作、数据筛选、数据转换及正则表达式的应用等,适合希望提高数据处理效率的学习者。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

axis=1   # 表示按行索引
axis=0   # 表示按列索引

# 元组切片
aa = (1, 2, 3, 5, 6)
print(aa[0], aa[1], aa[2])

# 字符串切片
str1 = 'this is uesd to test substring'
print("从str1中取第四个到第五个字符,结果:", str1[1: -1])  # his is uesd to test substrin

# linestring的选取
i = "LINESTRING(13294064.7654 4312942.966899798, 13794019.877 4312953.864499999, 13270146.0587 4337944.319439999, 13294064.7654 4312942.966899798)"
print(i.replace('LINESTRING', '')[1: -1].split(' ')[0])

# 正则表达
import re
regex = re.compile(r"\d+\.+\d+")
i = "MULTISTRING ((13294064.7654 4312942.966899798, 13794019.877 4312953.864499999, 13270146.0587 4337944.319439999,13294064.7654 4312942.966899798))"
aa = re.findall(regex, i)
print(aa)

# 集合无法保证谁是第一个输出
differ_id = {20, 50, 60, 80, 45, 78, 82}
gid_list = {5, 60, 20, 25, 50, 40, 80, 70}
difference_id = list(differ_id.difference(gid_list))
print(differ_id)
print(gid_list)
print(difference_id)

# 将多个list转换为dataframe(要保证每个list的长度都相同)
import pandas as pd
a = [1, 5, 6, 8, 2, 4, 7]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c})
print(df)
print(df['first'].dtype)    # 查看某一列的数值类型
print(df.dtypes)   # 查看所有dataframe的数值类型

# apply结合lambda的用法
import numpy as np
import pandas as pd

data = np.arange(0, 16).reshape(4, 4)
data = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'])
print(data)
data['new_a'] = data[['b', 'c']].apply(lambda x: x.b - x.c, axis=1)  # 对b列和c列的每一行进行相减
print(data)

# 取出包含NaN的所有行,不包含NaN的所有行
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
nan = np.nan
a = [1, 5, 6, 8, 2, 4, 7]
b = [11, 15, 16, 18, 12, nan, nan]
c = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c})
print(df)
print('-------------')
df1 = df.dropna(subset=['second'])
df1['second'] = df1['second'].astype(int)
print(df1)
print('-------------')
df2 = df[df['second'].isnull()]
df2.loc[:, 'second'] = [i+1 for i in range(0, df2.shape[0])]
print(df2)

# 生成多个相同的字符串或数字的列表
a = 'qz_high'
b = [a]*10
print(b)

# 将True的类别进行统计
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
class_correct[1] += True  # 这里只把是True的类别进行统计
class_total[1] += 1
print(class_correct)
print(class_total)

# 编写一个程序,该程序接受一系列以空白格分隔的单词作为输入,并在删除所有重复单词且按照字母顺序排序后,输出这些单词。
s = "hello world and practice makes perfect and hello world again"
words = [word for word in s.split(" ")]
print(words)
print(" ".join(sorted(list(set(words)))))

# 列出dataframe中的某列不同的数据值
import pandas as pd
a = [1, 5, 6, 8, 2, 1, 1]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c})
print(list(set(df['first'])))  # [1, 2, 5, 6, 8]

# 将dataframe的列名变成可以批量插入数据库的形式
import pandas as pd
a = [1, 5, 6, 8, 2, 1, 1]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c})
print(df)
columns_str = '({})'.format(', '.join(df.columns))
print(columns_str)  # (first, second, third)

# 找出两个数的和等于target的值的下标
class Solution(object):
    def twoSum(self, nums, target):
        """
        :type nums: List[int]
        :type target: int
        :rtype: List[int]
        """
        length = len(nums)
        for i in range(0, length - 1):
            for j in range(i + 1, length):
                if nums[i] + nums[j] == target:
                    return [i, j]


nums = [2, 7, 11, 15]
target = 9
values = Solution().twoSum(nums, target)
print(values)

# 删除某些列的dataframe,或者只取某些列的dataframe
import pandas as pd

a = [1, 5, 6, 8, 2, 1, 1]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
# 删除某几列
df.drop(columns=['first', 'second'], inplace=True)
print(df)
# 删除某几行
df.drop([0, 3], inplace=True)
print(df)
# 只需要某几列的dataframe
df = df.loc[:, ['first', 'second']]
print(df)

# 元组是否可以用difference_update
a = set([(1, 5), (2, 6)])
b = set([(1, 5), (2, 6), (6, 8)])
print(a)
print(b)
b.difference_update(a)
print(b)

# 取一位小数点
import pandas as pd
a = [1.0, 5, 6, 8, 2, 7, 9]
b = [11, 15.2, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
df = df.round({"first": 1, "second": 1})
print(df)

# 两个dataframe取差集(id有不同)
import pandas as pd
#
aa = [1, 5, 6]
bb = [11, 15, 16]
cc = [111, 115, 116]
dd = [111, 115, 116]
df1 = pd.DataFrame({'one': aa, 'two': bb, 'three': cc, 'four': dd})
print(df1)
a = [1, 5, 6, 8, 2, 7, 9]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
diff = list(set(df['first']).difference(set(df1['one'])))
print(diff)
df_new = df.loc[df['first'].isin(diff), :]
print(df_new)
df_new.loc[:, 'forth'] = [i + 1 for i in range(0, df_new.shape[0])]
print(df_new)

# 拿到df中first列与df1中one列不同的dataframe
import pandas as pd
a = [1, 5, 6, 8, 2, 7, 9]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
df_new = df.loc[1:5, ['first', 'second']]
df_new['gid'] = [i for i in range(0, df_new.shape[0])]
print(df_new)
print(df)

# 替换重复值
import pandas as pd
a = [1, 5, 6, 6, 6, 7, 9]
b = [11, 15, 16, 18, 12, 14, 17]
c = [111, 115, 116, 118, 112, 114, 117]
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
df1 = df[~df['first'].duplicated()]
print(df1)
gid_sum = df['first'].duplicated().sum()
print(gid_sum)
df_gid_dup = df[df['first'].duplicated()]
df_gid_dup['first'] = [i+1 for i in range(0, df_gid_dup.shape[0])]
print(df_gid_dup)
df = pd.concat([df1, df_gid_dup])
print(df)

# 替换某一行中的很多值
import pandas as pd
import numpy as np
nan = np.nan
a = [1, 5, 6, 8, 3, 7, 9]
b = [nan, nan, nan, 'PE', 'PE', nan, nan]
c = [nan, nan, '中压A', '中压A', nan, nan, nan]
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
df.loc[df['first'] == 5, ('second', 'third', 'forth')] = df.loc[df['first'] == 6, ('second', 'third', 'forth')].values
print(df)

# 深度遍历举例
# import matplotlib.pyplot as plt
import networkx as nx
graph = nx.Graph()
# graph.add_edges_from([(12, 25), (41, 42), (29, 30), (30, 31), (31, 32), (52, 53), (53, 54), (54, 55), (40, 41)])
graph.add_edge(2, 3, gis_id='b')
graph.add_edge(2, 4, gis_id='c')
graph.add_edge(4, 5, gis_id='d')
graph.add_edge(4, 6, gis_id='e')
graph.add_edge(6, 7, gis_id='f')
# nx.draw(graph, with_labels=True)
for source, target in nx.dfs_edges(graph, 2, depth_limit=1):
    print(source, target)

# map结合lambda使用
import pandas as pd
a = [1, 5, 6, 8, 3, 7, 9]
b = ['PE', 'PE', 'PE', 'PE', 'PE', 'PE', 'PE']
c = ['中压A', '中压A', '中压A', '中压A', '中压A', '中压A', '中压A']
d = [111, 115, 116, 118, 112, 114, 117]
df = pd.DataFrame({'first': a, 'second': b, 'third': c, 'forth': d})
print(df)
fill_id = [1, 2, 3, 4, 5, 6, 7]
outer_d_value = df.loc[:, 'first'].values.tolist()
length_value = df.loc[:, 'second'].values.tolist()
material_value = df.loc[:, 'third'].values.tolist()
pressure_value = df.loc[:, 'forth'].values.tolist()
print(length_value)
# map结合lambda使用
values_list = list(map(lambda a, b, c, d, e: (int(a), b, c, d, e), fill_id, outer_d_value,
                       length_value, material_value, pressure_value))
print(values_list)

# 将空格转换为nan后查看 
df['address'] = df['address'].apply(lambda x: np.NaN if str(x) isspace() else x)

# lambda将分钟的数据统一到五分钟的间隔上
import pandas as pd

x = [5, 6, 8, 9, 54, 21, 5, 7, 2, 41, 66]
df = pd.DataFrame(x, columns=['a'])
print(df)
interval_time_list = [*range(0, 56, 5)]
print(interval_time_list)
b = df['a'].apply(lambda x: min(interval_time_list, key=lambda time: abs(time - x)))
print(b)

import pandas as pd

x = [5, 8, 6, 9]
ts = [
    "2023-1-12 11:59:21",
    "2023-1-12 11:59:30",
    "2023-1-12 11:59:01",
    "2023-1-12 12:00:05"
]
df = pd.DataFrame({'a': x, 'ts': ts})
df['ts'] = pd.to_datetime(df['ts'])
print(df)
df['diff'] = abs(df['ts'] - pd.Timestamp("2023-1-12 12:00:00"))
print(df)
df = df.sort_values(['diff'])
print(df.head(1))
print(df[df['diff'] == df['diff'].min()])


# merge的用法
import pandas as pd

left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
                       'A': ['A0', 'A1', 'A2', 'A3'],
                       'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K2', 'K3', 'K1', 'K0'],
                        'C': ['C0', 'C1', 'C2', 'C3'],
                        'D': ['D0', 'D1', 'D2', 'D3']})
result = pd.merge(left, right, how='left', on='key')

# on参数传递的key作为连接键
print(left)
print(right)
print(result)

# 将dataframe保存到excel中
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, \
    median_absolute_error, mean_absolute_percentage_error, r2_score
import pandas as pd
import os


final_score = {}
vv = {'aa': 11, 'bb': 22}
for user, j in vv:
    v1 = ['东南', '西南', '北部']
    user_score_dict = {}
    for k in v1:
        y_true = [2, 1, 5, 7, 9, 7, 2]
        y_pred = [5, 7, 8, 4, 6, 5, 3]

        score_dict = {}
        score_dict["mae_score"] = mean_absolute_error(y_true, y_pred)
        score_dict["mape_score"] = mean_absolute_percentage_error(y_true, y_pred)
        score_dict["mse_score"] = mean_squared_error(y_true, y_pred)
        score_dict["score_r2"] = r2_score(y_true, y_pred)

        user_score_dict[k] = score_dict
    print(user_score_dict)
    final_score[user] = user_score_dict
print(final_score)
print(pd.DataFrame(final_score))

# 重要结果保存
os.makedirs("../score", exist_ok=True)
writer = pd.ExcelWriter("../score/transfer_leaning_score.xlsx", engine='openpyxl')
# pd.DataFrame(all_mmd_dict).to_excel(writer, sheet_name="all_mmd_dict")
# pd.DataFrame(feature_mmd_dict).to_excel(writer, sheet_name="feature_mmd_dict")
pd.DataFrame(final_score).to_excel(writer, sheet_name="single_score_dict")
# pd.DataFrame(multi_score_dict).to_excel(writer, sheet_name="multi_score_dict")

writer.save()

# 计算mmd分数后,用策略进行训练
# user_mmd_dict = {'中部南城_李永波羽毛球馆1':{"中部厚街_升辉1":3.2227,"中部厚街_升":3.22}, '中部南城_李永波羽毛球馆':{"中部厚街_升辉":2.1495, "中部厚街_":2.14}}
df_copy_dict = {"中部厚街_升辉1": 3.2, "中部厚街_升": 2.3, "中部厚街_": 1.3}
user_mmd_dict = {}
for user, user_data in df_copy_dict.items():
    mmd_dict = {}

    for k, v in df_copy_dict.items():
        mmd_dict[k] = v
    user_mmd_dict[user] = mmd_dict
a = list(user_mmd_dict['中部厚街_升辉1'].values())
print(a)
print(",".join(str(i) for i in a))
b = sorted(a, key=float)
print(a)
# print(type(b))
# print()
print(b[1:])
c = b[1:]


# 在通过value拿到key
def get_keys(d, value):
    return [k for k, v in d.items() if v == value]


for i in c:
    aa = get_keys(df_copy_dict, i)
    print(aa)

# 将字典的value进行排序
a = {'中部南城_李永波羽毛球馆1': 0, '中部厚街_升辉1': 3.7571, '中部厚街_唯雅1': 3.2078, '中部厚街_承光五金(厨房)1': 4.4304}
# user_list = sorted(a.items(), key=lambda kv: (kv[1], kv[0]))
user_list = sorted(a.items(), key=lambda x: x[1])
print(user_list)

# 将多个列相同的数据进行concat拼接
import pandas as pd

df1 = pd.DataFrame({'ID': [1, 2, 3], 'Name': ['Tim', 'Victor', 'Nick'], 'age': [1, 2, 3]})  # 构造原始数据文件
df2 = pd.DataFrame({'age': [1, 6, 8], 'ID': [4, 5, 6], 'Name': ['yim', 'rose', 'soe']})  # 构造原始数据文件
df3 = pd.DataFrame({'Name': ['san', 'dai', 'sin'], 'age': [11, 21, 31], 'ID': [7, 8, 9]})  # 构造原始数据文件
df_dict = {}
df_dict['first'] = df1
df_dict['second'] = df2
df_dict['third'] = df3
print(df_dict)
user_list = ['first', 'second', 'third']
d = pd.concat([df_dict[i] for i in user_list], axis=0)
print(d)


# isin和index的使用
import pandas as pd


a = [1, 2, 3, 4, 5, 6, 7, 8]
b = ['PE', 'PE', 'PE', 'PE', 'PE', 'PE', 'PE', 'PE']
c = ['高压', '高压', '高压', '高压', '高压', '高压', '高压', '高压']
d = [10, 21, 22, 22, 24, 20, 23, 25]
e = [10, 21, 22, 24, 23, 23, 25, 26]
f = [20, 30, 30, 20, 20, 20, 20, 20]
df = pd.DataFrame({'id': a, 'source': d, 'target': e, 'material': b, 'pressure': c, 'weight': f})
print(df)
df.loc[df['weight'] == 30, 'source'] = 6
print(df)
print(df.loc[df['source'] == 6, 'id'].values)
id_ = tuple(df.loc[df['source'] == df['target'], 'id'])
print(id_)
df.drop(df[df['id'].isin(id_)].index, inplace=True)
print(df)


# 将浮点数保留2位小数,不能四舍五入,两种方法
1、
df['s'] = df['weight'].apply(lambda x: ".".join([str(x).partition('.')[0], (str(x).partition('.')[-1] + "0"*2)[:2]])).astype(float)
2、
pipe_data['x_source'] = pipe_data['x_source'].apply(lambda x: int(x * 1000) / 1000)


def fun(x):
    re_linestring = re.compile(r'\d+\.*\d+')
    x['a'], x['b'] = list(map(float, re.findall(re_linestring, x['geom'])))
    return x
df = df.apply(fun, axis=1)

# 将时间中的时分秒取出
df_t1['m'] = df_t1['ts'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S').minute)

# 将连续相同的数据删除
import pandas as pd


a = [1, 2, 3, 4, 5, 6, 7, 8]
b = ['PE', 'PE', 'PE', 'PE', 'PE', 'PE', 'PE', 'PE']
c = ['高压', '高压', '高压', '高压', '高压', '高压', '高压', '高压']
d = [10, 21.23, 22, 22, 24, 20, 23, 25]
e = [10, 21, 22, 24, 23, 23, 25, 26]
f = [30, 30, 10, 20, 20, 6, 10, 20]
df = pd.DataFrame({'id': a, 'source': d, 'target': e, 'material': b, 'pressure': c, 'weight': f})
print(df)
df2 = df.groupby((df['weight'].shift() != df['weight']).cumsum()).filter(lambda x: len(x) <= 1)
print(df2)

# 判断数值型是否有nan
np.isnan(i)

# 将统计的数保存为dataframe
df_ = df['date'].value_counts().to_frame()

# 修改index
df_ = df_.reset_index().rename(columns={'index': 'ts'})

# 修改列名
df_.rename(columns={'date': 'counts'}, inplace=True)

# 将dataframe的第一行变成一个单独的dataframe
data_first = res_data.loc[[0], :]

# 如何将Timestamp转化为字符串
data['created_time'] = data['created_time'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))

# 正则提取所有带数字后缀的列名,如 d1, lambda2 等
pattern = re.compile(r'([a-zA-Z_]+)(\d+)')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值