etchone(): fetchall(): rowcount: cur.execute

本文介绍了使用Python进行SQL查询的基本方法,包括fetchone(), fetchall()和rowcount属性的使用,以及通过execute()方法执行SQL语句来插入数据的具体实践。
  • fetchone(): 该方法获取下一个查询结果集。结果集是一个对象
  • fetchall():接收全部的返回结果行.
  • rowcount: 这是一个只读属性,并返回执行execute()方法后影响的行数。-1表示还没开始查询或没有查询到数据

###插入两行数据

cur.execute('insert into account(accid,money) VALUES (1,110)')

cur.execute('insert into account(accid,money) VALUES (2,10)')

 

参考:https://blog.youkuaiyun.com/weixin_34417635/article/details/85851122

import psycopg2 from psycopg2 import sql from datetime import datetime import logging logging.basicConfig(level=logging.INFO) def table_migration(source_conn, target_conn): """使用COPY命令迁移表数据""" with source_conn.cursor() as src_cur, target_conn.cursor() as tgt_cur: # 获取所有表名 src_cur.execute(""" SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' """) tables = [row[0] for row in src_cur.fetchall()] for table in tables: logging.info(f"迁移表: {table}") # 使用COPY命令高效传输 src_cur.execute( sql.SQL("COPY {} TO STDOUT").format(sql.Identifier(table)) ) tgt_cur.execute( sql.SQL("COPY {} FROM STDIN").format(sql.Identifier(table)) ) while data := src_cur.read(8192): # 分块传输 tgt_cur.write(data) target_conn.commit() def incremental_migration(source_conn_str,target_conn_str,table_name): source_conn=psycopg2.connect(source_conn_str) target_conn=psycopg2.connect(target_conn_str) try: create_state_table(target_conn) last_ts=get_last_migration_time(target_conn,table_name) with source_conn.cursor() as src_cur: query=f""" SELECT * FROM {table_name} WHERE updated_at >'{last_ts.isoformat()}' ORDER BY updated_at """ src_cur.execute(query) rows=src_cur.fatchall() col_names=[desc[0] for desc in src_cur.description] if rows: with target_conn.cursor() as tar_cur: columns=','.join(col_names) placeholders=','.join(['%s']*len(col_names)) update_set=','.join([f"{col}=EXCLUDE.{col}" for col in col_names if col !='id']) upsert_sql=f""" INSERT INTO {table_name} ({columns}) VALUES ({placeholders}) ON CONFLICT (id) DO UPDATE SET {update_set} """ tar_cur.executemany(upsert_sql,rows) new_last_ts=max(row[col_names.index('updated_at')] for row in rows) update_migration_state(target_conn,table_name,new_last_ts) print(f"迁移成功:{len(rows)} 条记录,最后时间戳:{new_last_ts}") else: print("无新数据需要迁移") target_conn.commit() except Exception as e: target_conn.rollback() print(f"迁移失败:{str(e)}") finally: source_conn.close() target_conn.close() def create_state_table(conn): with conn.cursor() as cur: cur.execute( """ CREATE TABLE IF NOT EXISTS migration_state( tabel_name TEXT PRIMARY KEY, last_updated TIMESTAMPTZ NPT NULL DEFAULT '1970-01-01' ) """ ) conn.commit() def get_last_migration_time(conn,table_name): with conn.cursor() as cur: cur.excute(""" SELECT last_updated FROM migration_state WHERE table_name = %s """, (table_name,)) result = cur.fetchone() return result[0] if result else datetime(1970, 1, 1) def update_migration_state(conn, table_name, last_ts): """更新迁移状态""" with conn.cursor() as cur: cur.execute(""" INSERT INTO migration_state (table_name, last_updated) VALUES (%s, %s) ON CONFLICT (table_name) DO UPDATE SET last_updated = EXCLUDED.last_updated """, (table_name, last_ts)) conn.commit() # 使用示例 if __name__ == "__main__": source_db = "host=src_host dbname=src_db user=user password=pass" target_db = "host=tar_host dbname=tar_db user" tabel_name = "project_branches" incremental_migration(source_db, target_db,) 优化这个增量迁移数据的脚本
06-16
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值