block_dump观察Linux IO写入的具体文件

本文介绍了一种在Linux环境下用于调试程序IO状况的工具——Block Dump。它可以在全局视角之外提供具体文件的IO信息,包括进程号、inode号、文件名及磁盘设备名等。文章详细解释了其使用方法及工作原理。

http://www.oenhan.com/block-dump-linux-io

很多情况下开发者调测程序需要在Linux下获取具体的IO的状况,目前常用的IO观察工具用vmstat和iostat,具体功能上说当然是iostat更胜一筹,在IO统计上时间点上更具体精细。但二者都是在全局上看到IO,宏观上的数据对于判断IO到哪个文件上毫无帮助,这个时候block_dump的作用就显现出来了。

一、使用方法:

需要先停掉syslog功能,因为具体IO数据要通过printk输出,如果syslog存在,则会往message产生大量IO,干扰正常结果

1
2
suse:~ # service syslog stop
Shutting down syslog services done

然后启动block_dump

1
suse:~ # echo 1 > /proc/sys/vm/block_dump

先说效果:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
suse:~ # dmesg | tail
dmesg(3414): dirtied inode 9594 (LC_MONETARY) on sda1
dmesg(3414): dirtied inode 9238 (LC_COLLATE) on sda1
dmesg(3414): dirtied inode 9241 (LC_TIME) on sda1
dmesg(3414): dirtied inode 9606 (LC_NUMERIC) on sda1
dmesg(3414): dirtied inode 9350 (LC_CTYPE) on sda1
kjournald(506): WRITE block 3683672 on sda1
kjournald(506): WRITE block 3683680 on sda1
kjournald(506): WRITE block 3683688 on sda1
kjournald(506): WRITE block 3683696 on sda1
kjournald(506): WRITE block 3683704 on sda1
kjournald(506): WRITE block 3683712 on sda1
kjournald(506): WRITE block 3683720 on sda1
kjournald(506): WRITE block 3683728 on sda1
kjournald(506): WRITE block 3683736 on sda1
kjournald(506): WRITE block 3683744 on sda1

通过dmesg信息可以看到IO正在写那些文件,有进程号,inode号,文件名和磁盘设备名;但每个文件写了多少呢,仅仅通过dirtied inode就看不出来了,还需要分析WRITE block,后面的数字并不是真正的块号,而是内核IO层获取的扇区号,除以8即为块号,然后根据debugfs工具的icheck和ncheck选项,就可以获取该文件系统块属于哪个具体文件,具体请google之。

二、基本原理:

block_dump的原理其实很简单,内核在IO层根据标志block_dump在IO提交给磁盘的关口卡主过关的每一个BIO,将它们的数据打出来:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
void submit_bio( int rw, struct bio *bio)
{
      int count = bio_sectors(bio);
 
      bio->bi_rw |= rw;
 
/*
  * If it's a regular read/write or a barrier with data attached,
  * go through the normal accounting stuff before submission.
  */
      if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
          if (rw & WRITE) {
          count_vm_events(PGPGOUT, count);
      } else {
          task_io_account_read(bio->bi_size);
          count_vm_events(PGPGIN, count);
      }
 
      if (unlikely(block_dump)) {
          char b[BDEVNAME_SIZE];
          printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)n" ,
               current->comm, task_pid_nr(current),
               (rw & WRITE) ? "WRITE" : "READ" ,
               (unsigned long long )bio->bi_sector,
               bdevname(bio->bi_bdev, b),
               count);
         }
     }
 
     generic_make_request(bio);
}

具体WRITE block块号和文件系统块号之间的对应关系在submit_bh函数中决定

1
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);

inode的block_dump实现是通过block_dump___mark_inode_dirty搞定的,这次把关口架在inode脏数据写回的路上,把每个过关的inode信息打出来:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
void __mark_inode_dirty( struct inode *inode, int flags)
{
 
if (unlikely(block_dump))
block_dump___mark_inode_dirty(inode);
 
}
 
static noinline void block_dump___mark_inode_dirty( struct inode *inode)
{
if (inode->i_ino || strcmp (inode->i_sb->s_id, "bdev" )) {
struct dentry *dentry;
const char *name = "?" ;
 
dentry = d_find_alias(inode);
if (dentry) {
spin_lock(&dentry->d_lock);
name = ( const char *) dentry->d_name.name;
}
printk(KERN_DEBUG
"%s(%d): dirtied inode %lu (%s) on %sn" ,
current->comm, task_pid_nr(current), inode->i_ino,
name, inode->i_sb->s_id);
if (dentry) {
spin_unlock(&dentry->d_lock);
dput(dentry);
}
  }

三、总结

1.内核由很多合适的关口来截获获取的IO信息,不改动内核,也可以用jprobe抢劫很多东西。

2.debugfs在大量的block–>file转换过程总太慢,自己用ext2fs写一个,效率应该能提高很多。


// SPDX-License-Identifier: GPL-2.0-only /* * MTD Oops/Panic logger * * Copyright © 2007 Nokia Corporation. All rights reserved. * * Author: Richard Purdie <rpurdie@openedhand.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/console.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/timekeeping.h> #include <linux/mtd/mtd.h> #include <linux/kmsg_dump.h> #include <linux/reboot.h> #include <linux/platform_device.h> #include <linux/io.h> /* Maximum MTD partition size */ #define MTDOOPS_MAX_MTD_SIZE (16 * 1024 * 1024) static unsigned long record_size = 4096; module_param(record_size, ulong, 0400); MODULE_PARM_DESC(record_size, "record size for MTD OOPS pages in bytes (default 4096)"); static char mtddev[80]; module_param_string(mtddev, mtddev, 80, 0400); MODULE_PARM_DESC(mtddev, "name or index number of the MTD device to use"); static int dump_oops = 1; module_param(dump_oops, int, 0600); MODULE_PARM_DESC(dump_oops, "set to 1 to dump oopses, 0 to only dump panics (default 1)"); static unsigned long lkmsg_record_size = 512 * 1024; extern struct raw_notifier_head pwrkey_irq_notifier_list; #define MAX_CMDLINE_PARAM_LEN 256 static char build_fingerprint[MAX_CMDLINE_PARAM_LEN] = {0}; module_param_string(fingerprint, build_fingerprint, MAX_CMDLINE_PARAM_LEN,0644); static int boot_mode = 0; module_param(boot_mode, int, 0600); MODULE_PARM_DESC(boot_mode, "boot_mode (default 0)"); #define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00 /* Original */ #define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00 /* Adds the timestamp */ #define MTDOOPS_HEADER_SIZE 8 enum mtd_dump_reason { MTD_DUMP_UNDEF, MTD_DUMP_PANIC, MTD_DUMP_OOPS, MTD_DUMP_EMERG, MTD_DUMP_SHUTDOWN, MTD_DUMP_RESTART, MTD_DUMP_POWEROFF, MTD_DUMP_LONG_PRESS, MTD_DUMP_MAX }; static char *kdump_reason[8] = { "Unknown", "Kernel Panic", "Oops!", "Emerg", "Shut Down", "Restart", "PowerOff", "Long Press" }; enum mtdoops_log_type { MTDOOPS_TYPE_UNDEF, MTDOOPS_TYPE_DMESG, MTDOOPS_TYPE_PMSG, }; static char *log_type[4] = { "Unknown", "LAST KMSG", "LAST LOGCAT" }; struct pmsg_buffer_hdr { uint32_t sig; atomic_t start; atomic_t size; uint8_t data[0]; }; struct pmsg_platform_data { unsigned long mem_size; phys_addr_t mem_address; unsigned long console_size; unsigned long pmsg_size; }; struct mtdoops_hdr { u32 seq; u32 magic; ktime_t timestamp; } __packed; static struct mtdoops_context { struct kmsg_dumper dump; struct notifier_block reboot_nb; struct notifier_block pwrkey_long_press_nb; struct pmsg_platform_data pmsg_data; int mtd_index; struct work_struct work_erase; struct work_struct work_write; struct mtd_info *mtd; int oops_pages; int nextpage; int nextcount; unsigned long *oops_page_used; unsigned long oops_buf_busy; void *oops_buf; } oops_cxt; static void mark_page_used(struct mtdoops_context *cxt, int page) { set_bit(page, cxt->oops_page_used); } static void mark_page_unused(struct mtdoops_context *cxt, int page) { clear_bit(page, cxt->oops_page_used); } static int page_is_used(struct mtdoops_context *cxt, int page) { return test_bit(page, cxt->oops_page_used); } static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) { struct mtd_info *mtd = cxt->mtd; u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; u32 start_page = start_page_offset / record_size; u32 erase_pages = mtd->erasesize / record_size; struct erase_info erase; int ret; int page; erase.addr = offset; erase.len = mtd->erasesize; ret = mtd_erase(mtd, &erase); if (ret) { pr_warn("erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", (unsigned long long)erase.addr, (unsigned long long)erase.len, mtddev); return ret; } /* Mark pages as unused */ for (page = start_page; page < start_page + erase_pages; page++) mark_page_unused(cxt, page); return 0; } static void mtdoops_erase(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; int i = 0, j, ret, mod; /* We were unregistered */ if (!mtd) return; mod = (cxt->nextpage * record_size) % mtd->erasesize; if (mod != 0) { cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) { badblock: pr_warn("bad block at %08lx\n", cxt->nextpage * record_size); i++; cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { pr_err("all blocks bad!\n"); return; } } if (ret < 0) { pr_err("mtd_block_isbad failed, aborting\n"); return; } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { pr_debug("ready %d, %d\n", cxt->nextpage, cxt->nextcount); return; } if (ret == -EIO) { ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); if (ret < 0 && ret != -EOPNOTSUPP) { pr_err("block_markbad failed, aborting\n"); return; } } goto badblock; } /* Scheduled work - when we can't proceed without erasing a block */ static void mtdoops_workfunc_erase(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_erase); mtdoops_erase(cxt); } static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic) { cxt->nextpage++; if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; cxt->nextcount++; if (cxt->nextcount == 0xffffffff) cxt->nextcount = 0; if (page_is_used(cxt, cxt->nextpage)) { pr_debug("not ready %d, %d (erase %s)\n", cxt->nextpage, cxt->nextcount, panic ? "immediately" : "scheduled"); if (panic) { /* In case of panic, erase immediately */ mtdoops_erase(cxt); } else { /* Otherwise, schedule work to erase it "nicely" */ schedule_work(&cxt->work_erase); } } else { pr_debug("ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); } } static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; size_t retlen; struct mtdoops_hdr *hdr; int ret; if (test_and_set_bit(0, &cxt->oops_buf_busy)) return; /* Add mtdoops header to the buffer */ hdr = (struct mtdoops_hdr *)cxt->oops_buf; hdr->seq = cxt->nextcount; hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2; hdr->timestamp = ktime_get_real(); if (panic) { ret = mtd_panic_write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); if (ret == -EOPNOTSUPP) { pr_err("Cannot write from panic without panic_write\n"); goto out; } } else ret = mtd_write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); if (retlen != record_size || ret < 0) pr_err("write failure at %ld (%td of %ld written), error %d\n", cxt->nextpage * record_size, retlen, record_size, ret); mark_page_used(cxt, cxt->nextpage); // memset(cxt->oops_buf, 0xff, record_size); // mtdoops_inc_counter(cxt, panic); out: clear_bit(0, &cxt->oops_buf_busy); } static void mtdoops_workfunc_write(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_write); mtdoops_write(cxt, 0); } static void find_next_position(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; struct mtdoops_hdr hdr; int ret, page, maxpos = 0; u32 maxcount = 0xffffffff; size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { if (mtd_block_isbad(mtd, page * record_size)) continue; /* Assume the page is used */ mark_page_used(cxt, page); ret = mtd_read(mtd, page * record_size, sizeof(hdr), &retlen, (u_char *)&hdr); if (retlen != sizeof(hdr) || (ret < 0 && !mtd_is_bitflip(ret))) { pr_err("read failure at %ld (%zu of %zu read), err %d\n", page * record_size, retlen, sizeof(hdr), ret); continue; } if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff) mark_page_unused(cxt, page); if (hdr.seq == 0xffffffff || (hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 && hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2)) continue; if (maxcount == 0xffffffff) { maxcount = hdr.seq; maxpos = page; } else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) { maxcount = hdr.seq; maxpos = page; } else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) { maxcount = hdr.seq; maxpos = page; } else if (hdr.seq > maxcount && hdr.seq > 0xc0000000 && maxcount > 0x80000000) { maxcount = hdr.seq; maxpos = page; } } if (maxcount == 0xffffffff) { cxt->nextpage = cxt->oops_pages - 1; cxt->nextcount = 0; } else { cxt->nextpage = maxpos; cxt->nextcount = maxcount; } mtdoops_inc_counter(cxt, 0); } static void mtdoops_add_reason(char *oops_buf, int reason, enum mtdoops_log_type type, int index, int nextpage) { char str_buf[512] = {0}; int ret_len = 0; struct timespec64 now; struct tm ts; char temp_buf[32] = {0}; int temp_len = 0; char BootMode[20] = {0}; unsigned long local_time; ktime_get_coarse_real_ts64(&now); /*set title time to UTC+8*/ local_time = (unsigned long)(now.tv_sec + 8 * 60 * 60); time64_to_tm(local_time, 0, &ts); if (boot_mode == 0) { strcpy(BootMode, "normal"); } else if (boot_mode == 1) { strcpy(BootMode, "recovery"); } else if (boot_mode == 2) { strcpy(BootMode, "poweroff_charger"); } temp_len = snprintf(temp_buf, 32,"\n ---mtdoops report start--- \n"); memcpy(oops_buf, temp_buf, temp_len); ret_len = snprintf(str_buf, 200, "\n```\n## Oops_Index: %d\n### Build: %s\n## REASON: %s\n#### LOG TYPE:%s\n## BOOT MODE:%s\n##### %04ld-%02d-%02d %02d:%02d:%02d\n```c\n", index, build_fingerprint, kdump_reason[reason], log_type[type], BootMode, ts.tm_year+1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec); if(ret_len >= sizeof(str_buf)) ret_len = sizeof(str_buf); memcpy(oops_buf + temp_len, str_buf, ret_len); } static void mtdoops_add_pmsg_head(char *oops_buf, enum mtdoops_log_type type) { char str_buf[80] = {0}; int ret_len = 0; struct timespec64 now; struct tm ts; unsigned long local_time; ktime_get_coarse_real_ts64(&now); local_time = (unsigned long)(now.tv_sec + 8 * 60 * 60); time64_to_tm(local_time, 0, &ts); ret_len = snprintf(str_buf, 80, "\n```\n#### LOG TYPE:%s\n#####%04ld-%02d-%02d %02d:%02d:%02d\n```\n", log_type[type], ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec); memcpy(oops_buf, str_buf, ret_len); } static void mtdoops_do_dump(struct kmsg_dumper *dumper, enum mtd_dump_reason reason) { struct mtdoops_context *cxt = container_of(dumper, struct mtdoops_context, dump); struct kmsg_dump_iter iter; size_t ret_len = 0; void *pmsg_buffer_start = NULL; struct pmsg_buffer_hdr *p_hdr = NULL; int j = 0; int ret = 0; static int do_dump_count = 0; if(cxt->mtd == NULL) return; if(reason == KMSG_DUMP_SHUTDOWN || reason == KMSG_DUMP_EMERG) return; /* Only dump oopses if dump_oops is set */ if (reason == KMSG_DUMP_OOPS && !dump_oops) return; do_dump_count++; pr_err("%s start , count = %d , page = %d, reason = %d, dump_count = %d\n", __func__, cxt->nextcount, cxt->nextpage, reason, do_dump_count); if(do_dump_count>1) { for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); } kmsg_dump_rewind(&iter); if (test_and_set_bit(0, &cxt->oops_buf_busy)) return; kmsg_dump_get_buffer(&iter, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, lkmsg_record_size - MTDOOPS_HEADER_SIZE, &ret_len); clear_bit(0, &cxt->oops_buf_busy); mtdoops_add_reason(cxt->oops_buf + MTDOOPS_HEADER_SIZE, reason, MTDOOPS_TYPE_DMESG, cxt->nextcount, cxt->nextpage); pmsg_buffer_start = ioremap( ((cxt->pmsg_data.mem_address + cxt->pmsg_data.mem_size)- cxt->pmsg_data.pmsg_size), cxt->pmsg_data.mem_size); if (!device_base) { printk(KERN_ERR "ioremap failed!\n"); } p_hdr = (struct pmsg_buffer_hdr *)pmsg_buffer_start; pr_err("mtdoops_do_dump pmsg paddr = 0x%p \n", pmsg_buffer_start); if(p_hdr->sig == 0x43474244) { void *oopsbuf = cxt->oops_buf + (MTDOOPS_HEADER_SIZE + ret_len); uint8_t *p_buff_end = (uint8_t *)p_hdr->data + atomic_read(&p_hdr->size); int pmsg_cp_size = 0; int pstart = p_hdr->start.counter; int psize = p_hdr->size.counter; pmsg_cp_size = (record_size - (ret_len + MTDOOPS_HEADER_SIZE)); if (psize <= pmsg_cp_size) pmsg_cp_size = psize; if (pstart >= pmsg_cp_size) { memcpy(oopsbuf, p_hdr->data, pmsg_cp_size); } else { memcpy(oopsbuf, p_buff_end - (pmsg_cp_size - pstart), pmsg_cp_size - pstart); memcpy(oopsbuf + (pmsg_cp_size - pstart), p_hdr->data, pstart); } mtdoops_add_pmsg_head(cxt->oops_buf + (MTDOOPS_HEADER_SIZE + ret_len), MTDOOPS_TYPE_PMSG); } else pr_err("mtdoops: read pmsg failed sig = 0x%x \n", p_hdr->sig); if (reason == KMSG_DUMP_OOPS || reason == KMSG_DUMP_PANIC) { /* Panics must be written immediately */ mtdoops_write(cxt, 1); } else { /*we should write log immediately , if use work to write, *ufs will shutdown before write log finish */ mtdoops_write(cxt, 0); } pr_err("mtdoops_do_dump() finish \n"); } static int mtdoops_reboot_nb_handle(struct notifier_block *this, unsigned long event, void *ptr) { enum mtd_dump_reason reason; struct mtdoops_context *cxt = &oops_cxt; if (event == SYS_RESTART) reason = MTD_DUMP_RESTART; else if(event == SYS_POWER_OFF) reason = MTD_DUMP_POWEROFF; else return NOTIFY_OK; mtdoops_do_dump(&cxt->dump, reason); return NOTIFY_OK; } static int pwrkey_long_press_irq_event(struct notifier_block *this, unsigned long event, void *ptr) { struct mtdoops_context *cxt = &oops_cxt; mtdoops_do_dump(&cxt->dump, MTD_DUMP_LONG_PRESS); return NOTIFY_DONE; } static void mtdoops_do_null(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { return; } static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; u64 mtdoops_pages = div_u64(mtd->size, record_size); int err; if (!strcmp(mtd->name, mtddev)) cxt->mtd_index = mtd->index; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (mtd->size < mtd->erasesize * 2) { pr_err("MTD partition %d not big enough for mtdoops\n", mtd->index); return; } if (mtd->erasesize < record_size) { pr_err("eraseblock size of MTD partition %d too small\n", mtd->index); return; } if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { pr_err("mtd%d is too large (limit is %d MiB)\n", mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); return; } /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(array_size(sizeof(unsigned long), DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG))); if (!cxt->oops_page_used) { pr_err("could not allocate page array\n"); return; } cxt->dump.max_reason = KMSG_DUMP_MAX; cxt->dump.dump = mtdoops_do_null; err = kmsg_dump_register(&cxt->dump); if (err) { pr_err("registering kmsg dumper failed, error %d\n", err); vfree(cxt->oops_page_used); cxt->oops_page_used = NULL; return; } /*for restart and power off*/ cxt->reboot_nb.notifier_call = mtdoops_reboot_nb_handle; cxt->reboot_nb.priority = 255; register_reboot_notifier(&cxt->reboot_nb); cxt->pwrkey_long_press_nb.notifier_call = pwrkey_long_press_irq_event; cxt->pwrkey_long_press_nb.priority = 255; raw_notifier_chain_register(&pwrkey_irq_notifier_list, &cxt->pwrkey_long_press_nb); cxt->mtd = mtd; cxt->oops_pages = (int)mtd->size / record_size; find_next_position(cxt); pr_info("Attached to MTD device %d\n", mtd->index); } static void mtdoops_notify_remove(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (kmsg_dump_unregister(&cxt->dump) < 0) pr_warn("could not unregister kmsg_dumper\n"); unregister_reboot_notifier(&cxt->reboot_nb); cxt->mtd = NULL; flush_work(&cxt->work_erase); flush_work(&cxt->work_write); } static struct mtd_notifier mtdoops_notifier = { .add = mtdoops_notify_add, .remove = mtdoops_notify_remove, }; static int mtdoops_parse_dt_u32(struct platform_device *pdev, const char *propname, u32 default_value, u32 *value) { u32 val32 = 0; int ret; ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); if (ret == -EINVAL) { /* field is missing, use default value. */ val32 = default_value; } else if (ret < 0) { pr_err("failed to parse property %s: %d\n", propname, ret); return ret; } /* Sanity check our results. */ if (val32 > INT_MAX) { pr_err("%s %u > INT_MAX\n", propname, val32); return -EOVERFLOW; } *value = val32; return 0; } static int mtdoops_pmsg_probe(struct platform_device *pdev) { struct mtdoops_context *cxt = &oops_cxt; struct resource *res; u32 value; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_err("failed to locate DT /reserved-memory resource\n"); return -EINVAL; } cxt->pmsg_data.mem_size = resource_size(res); cxt->pmsg_data.mem_address = res->start; #define parse_u32(name, field, default_value) { \ ret = mtdoops_parse_dt_u32(pdev, name, default_value, \ &value); \ if (ret < 0) \ return ret; \ field = value; \ } parse_u32("console-size", cxt->pmsg_data.console_size, 0); parse_u32("pmsg-size", cxt->pmsg_data.pmsg_size, 0); #undef parse_u32 pr_err( "pares mtd_dt, mem_address =0x%llx, mem_size =0x%lx \n", cxt->pmsg_data.mem_address, cxt->pmsg_data.mem_size); pr_err( "pares mtd_dt, pmsg_size =0x%lx, console-size =0x%lx \n", cxt->pmsg_data.pmsg_size, cxt->pmsg_data.console_size); return 0; } static const struct of_device_id dt_match[] = { { .compatible = "xiaomi,mtdoops_pmsg" }, {} }; static struct platform_driver mtdoops_pmsg_driver = { .probe = mtdoops_pmsg_probe, .driver = { .name = "mtdoops_pmsg", .of_match_table = dt_match, }, }; static int __init mtdoops_init(void) { struct mtdoops_context *cxt = &oops_cxt; int mtd_index; char *endp; if (strlen(mtddev) == 0) { pr_err("mtd device (mtddev=name/number) must be supplied\n"); return -EINVAL; } if ((record_size & 4095) != 0) { pr_err("record_size must be a multiple of 4096\n"); return -EINVAL; } if (record_size < 4096) { pr_err("record_size must be over 4096 bytes\n"); return -EINVAL; } /* Setup the MTD device to use */ cxt->mtd_index = -1; mtd_index = simple_strtoul(mtddev, &endp, 0); if (*endp == '\0') cxt->mtd_index = mtd_index; cxt->oops_buf = kmalloc(record_size, GFP_KERNEL); if (!cxt->oops_buf) return -ENOMEM; memset(cxt->oops_buf, 0xff, record_size); cxt->oops_buf_busy = 0; INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); platform_driver_register(&mtdoops_pmsg_driver); register_mtd_user(&mtdoops_notifier); return 0; } static void __exit mtdoops_exit(void) { struct mtdoops_context *cxt = &oops_cxt; unregister_mtd_user(&mtdoops_notifier); kfree(cxt->oops_buf); vfree(cxt->oops_page_used); } module_init(mtdoops_init); module_exit(mtdoops_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver"); 问题堆栈对应mtdoops.c文件上传,怎么修复这个问题
最新发布
10-30
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值