分享:sp_get_table_max 获取实例下表行数最大的表

本文介绍了一个SQL存储过程示例,该过程用于获取指定数据库中最大表的数据,并提供了一种方法来查找表的最大行数。通过使用动态SQL和临时表,此存储过程能够在任意数据库上运行,帮助用户快速定位到数据量最大的表。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

USE master 
go 
IF object_id('sp_get_table_max','P') IS NOT NULL 
DROP PROCEDURE sp_get_table_max 
  
go 
CREATE PROCEDURE sp_get_table_max ( @db NVARCHAR(128) = '' ,@tb NVARCHAR(128) = '',@top INT = 100 )  
AS 

SET NOCOUNT ON ; 
 
CREATE TABLE #TableSpace   
  ( DB_NAME VARCHAR(128) DEFAULT ( db_name()), 
   TableName VARCHAR(128) , 
   SCHEMA_NAME VARCHAR(128), 
   RowsCount CHAR(32) ) 

DECLARE @sql NVARCHAR(max) 
SET @sql = '' 
SELECT @sql = @sql + REPLACE ( 
' 
USE [@dbname] 
INSERT INTO #TableSpace ( TableName , SCHEMA_NAME, RowsCount )  
SELECT b.name AS tablename ,SCHEMA_NAME(b.schema_id) SCHEMA_NAME, c.row_count AS datacount 
FROM  sys.indexes a , 
    sys.objects b , 
    sys.dm_db_partition_stats c 
WHERE  a.[object_id] = b.[object_id] 
    and b.type = ''U'' and b.name like '''+ @tb + '%''
    AND b.[object_id] = c.[object_id] 
    AND a.index_id = c.index_id 
    AND a.index_id < 2 
    AND b.is_ms_shipped = 0 
' ,'@dbname',name)  
FROM sys.databases 
WHERE name LIKE @db + '%' AND database_id >= 5 
PRINT @sql 
EXEC (@sql) 

SELECT TOP (@top) ' SELECT TOP 10 * FROM ['+ DB_NAME +'].['+ SCHEMA_NAME +'].['+TableName+ ']' AS SQL ,* 
FROM #TableSpace ORDER BY CAST(RowsCount AS BIGINT) DESC 
DROP TABLE [#TableSpace] 


go 
EXEC sp_MS_marksystemobject 'sp_get_table_max' 
go 
# pygcbs: # app_name: 'APP1' # master: '192.168.0.123' # port: 6789 # level: 'DEBUG' # interval: 1 # checklist: [ "System","CPU", "GPU","Mem","NPU", ] # save_path: "./" # docker: # pygcbs_image: nvidia-pygcbs:v1.0 # worker_image: nvidia-mindspore1.8.1:v1.0 # python_path: /opt/miniconda/bin/python # workers: # - '192.168.0.123:1' # socket_ifname: # - enp4s0 # tasks: #--------------------wide_deep--------------------------------- # - application_domain: "推荐" # task_framework: "Mindspore" # task_type: "推理" # task_name: "wide_deep_infer" # scenario: "SingleStream" # is_run_infer: True # project_path: '/home/gcbs/infer/wide_deep_infer' # main_path: "main.py" # dataset_path: '/home/gcbs/Dataset/wide_deep_data/' # times: 1 # 重试次数 #distribute do_eval: True is_distributed: False is_mhost: False exp_value: 0.501 #model log name: "wide_deep" Metrics: "AUC" request_auc: 0.74 dataset_name: "Criteo 1TB Click Logs Dataset" application: "推荐" standard_time: 3600 python_version: 3.8 mindspore_version: 1.8.1 # Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing) enable_modelarts: False data_url: "" train_url: "" checkpoint_url: "" data_path: "./data" dataset_path: "/home/gcbs/Dataset/wide_deep_data/" output_path: "/cache/train" load_path: "/cache/checkpoint_path" device_target: GPU enable_profiling: False data_format: 1 total_size: 10000000 performance_count: 10 # argparse_init 'WideDeep' epochs: 15 full_batch: False batch_size: 16000 eval_batch_size: 16000 test_batch_size: 16000 field_size: 39 vocab_size: 200000 vocab_cache_size: 0 emb_dim: 80 deep_layer_dim: [1024, 512, 256, 128] deep_layer_act: 'relu' keep_prob: 1.0 dropout_flag: False ckpt_path: "./check_points" stra_ckpt: "./check_points" eval_file_name: "./output/eval.log" loss_file_name: "./output/loss.log" host_device_mix: 0 dataset_type: "mindrecord" parameter_server: 0 field_slice: False sparse: False use_sp: True deep_table_slice_mode: "column_slice" #star_logen config mlperf_conf: './test.conf' user_conf: './user.conf' output: '/tmp/code/' scenario: 'Offline' max_batchsize: 16000 threads: 4 model_path: "./check_points/widedeep_train-12_123328.ckpt" is_accuracy: False find_peak_performance: False duration: False target_qps: False count_queries: False samples_per_query_multistream: False max_latency: False samples_per_query_offline: 500 # WideDeepConfig #data_path: "./test_raw_data/" #vocab_cache_size: 100000 #stra_ckpt: './checkpoints/strategy.ckpt' weight_bias_init: ['normal', 'normal'] emb_init: 'normal' init_args: [-0.01, 0.01] l2_coef: 0.00008 # 8e-5 manual_shape: None # wide_and_deep export device_id: 1 ckpt_file: "./check_points/widedeep_train-12_123328.ckpt" file_name: "wide_and_deep" file_format: "MINDIR" # src/process_data.py "Get and Process datasets" raw_data_path: "./raw_data" # src/preprocess_data.py "Recommendation dataset" dense_dim: 13 slot_dim: 26 threshold: 100 train_line_count: 45840617 skip_id_convert: 0 # src/generate_synthetic_data.py 'Generate Synthetic Data' output_file: "./train.txt" label_dim: 2 number_examples: 4000000 vocabulary_size: 400000000 random_slot_values: 0 #get_score threads_count: 4 base_score: 1 accuracy: 0.72 baseline_performance: 1文件中的这些是什么?、
06-17
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值