int( (LONG)(LONG_PTR)&(((type *)0)->field))的最好解释

本文介绍了如何使用NULL指针((type*)0)来计算C语言中结构体成员的偏移量,这种方法避免了实例化对象并在编译期间完成计算,减轻了运行时的负担。
部署运行你感兴趣的模型镜像
在优快云上查到的。
ANSI C标准允许值为0的常量被强制转换成任何一种类型的指针,
并且转换结果是一个NULL指针,因此((type *)0)的结果就是一个类型为type *的NULL指针。
如果利用这个NULL指针来访问type的成员当然是非法的,
但&( ((type *)0)->field )的意图仅仅是计算field字段的地址。
聪明的编译器根本就不生成访问type的代码,
而仅仅是根据type的内存布局和结构体实例首址在编译期计算这个(常量)地址,
这样就完全避免了通过NULL指针访问内存的问题。
又因为首址为0,所以这个地址的值就是字段相对于结构体基址的偏移。
以上方法避免了实例化一个type对象,并且求值在编译期进行,没有运行期负担。

您可能感兴趣的与本文相关的镜像

Linly-Talker

Linly-Talker

AI应用

Linly-Talker是一款创新的数字人对话系统,它融合了最新的人工智能技术,包括大型语言模型(LLM)、自动语音识别(ASR)、文本到语音转换(TTS)和语音克隆技术

// SPDX-License-Identifier: GPL-2.0-only /* * MTD Oops/Panic logger * * Copyright © 2007 Nokia Corporation. All rights reserved. * * Author: Richard Purdie <rpurdie@openedhand.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/console.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/timekeeping.h> #include <linux/mtd/mtd.h> #include <linux/kmsg_dump.h> #include <linux/reboot.h> #include <linux/platform_device.h> #include <linux/io.h> /* Maximum MTD partition size */ #define MTDOOPS_MAX_MTD_SIZE (16 * 1024 * 1024) static unsigned long record_size = 4096; module_param(record_size, ulong, 0400); MODULE_PARM_DESC(record_size, "record size for MTD OOPS pages in bytes (default 4096)"); static char mtddev[80]; module_param_string(mtddev, mtddev, 80, 0400); MODULE_PARM_DESC(mtddev, "name or index number of the MTD device to use"); static int dump_oops = 1; module_param(dump_oops, int, 0600); MODULE_PARM_DESC(dump_oops, "set to 1 to dump oopses, 0 to only dump panics (default 1)"); static unsigned long lkmsg_record_size = 512 * 1024; extern struct raw_notifier_head pwrkey_irq_notifier_list; #define MAX_CMDLINE_PARAM_LEN 256 static char build_fingerprint[MAX_CMDLINE_PARAM_LEN] = {0}; module_param_string(fingerprint, build_fingerprint, MAX_CMDLINE_PARAM_LEN,0644); static int boot_mode = 0; module_param(boot_mode, int, 0600); MODULE_PARM_DESC(boot_mode, "boot_mode (default 0)"); #define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00 /* Original */ #define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00 /* Adds the timestamp */ #define MTDOOPS_HEADER_SIZE 8 enum mtd_dump_reason { MTD_DUMP_UNDEF, MTD_DUMP_PANIC, MTD_DUMP_OOPS, MTD_DUMP_EMERG, MTD_DUMP_SHUTDOWN, MTD_DUMP_RESTART, MTD_DUMP_POWEROFF, MTD_DUMP_LONG_PRESS, MTD_DUMP_MAX }; static char *kdump_reason[8] = { "Unknown", "Kernel Panic", "Oops!", "Emerg", "Shut Down", "Restart", "PowerOff", "Long Press" }; enum mtdoops_log_type { MTDOOPS_TYPE_UNDEF, MTDOOPS_TYPE_DMESG, MTDOOPS_TYPE_PMSG, }; static char *log_type[4] = { "Unknown", "LAST KMSG", "LAST LOGCAT" }; struct pmsg_buffer_hdr { uint32_t sig; atomic_t start; atomic_t size; uint8_t data[0]; }; struct pmsg_platform_data { unsigned long mem_size; phys_addr_t mem_address; unsigned long console_size; unsigned long pmsg_size; }; struct mtdoops_hdr { u32 seq; u32 magic; ktime_t timestamp; } __packed; static struct mtdoops_context { struct kmsg_dumper dump; struct notifier_block reboot_nb; struct notifier_block pwrkey_long_press_nb; struct pmsg_platform_data pmsg_data; int mtd_index; struct work_struct work_erase; struct work_struct work_write; struct mtd_info *mtd; int oops_pages; int nextpage; int nextcount; unsigned long *oops_page_used; unsigned long oops_buf_busy; void *oops_buf; } oops_cxt; static void mark_page_used(struct mtdoops_context *cxt, int page) { set_bit(page, cxt->oops_page_used); } static void mark_page_unused(struct mtdoops_context *cxt, int page) { clear_bit(page, cxt->oops_page_used); } static int page_is_used(struct mtdoops_context *cxt, int page) { return test_bit(page, cxt->oops_page_used); } static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) { struct mtd_info *mtd = cxt->mtd; u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; u32 start_page = start_page_offset / record_size; u32 erase_pages = mtd->erasesize / record_size; struct erase_info erase; int ret; int page; erase.addr = offset; erase.len = mtd->erasesize; ret = mtd_erase(mtd, &erase); if (ret) { pr_warn("erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", (unsigned long long)erase.addr, (unsigned long long)erase.len, mtddev); return ret; } /* Mark pages as unused */ for (page = start_page; page < start_page + erase_pages; page++) mark_page_unused(cxt, page); return 0; } static void mtdoops_erase(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; int i = 0, j, ret, mod; /* We were unregistered */ if (!mtd) return; mod = (cxt->nextpage * record_size) % mtd->erasesize; if (mod != 0) { cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) { badblock: pr_warn("bad block at %08lx\n", cxt->nextpage * record_size); i++; cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { pr_err("all blocks bad!\n"); return; } } if (ret < 0) { pr_err("mtd_block_isbad failed, aborting\n"); return; } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { pr_debug("ready %d, %d\n", cxt->nextpage, cxt->nextcount); return; } if (ret == -EIO) { ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); if (ret < 0 && ret != -EOPNOTSUPP) { pr_err("block_markbad failed, aborting\n"); return; } } goto badblock; } /* Scheduled work - when we can't proceed without erasing a block */ static void mtdoops_workfunc_erase(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_erase); mtdoops_erase(cxt); } static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic) { cxt->nextpage++; if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; cxt->nextcount++; if (cxt->nextcount == 0xffffffff) cxt->nextcount = 0; if (page_is_used(cxt, cxt->nextpage)) { pr_debug("not ready %d, %d (erase %s)\n", cxt->nextpage, cxt->nextcount, panic ? "immediately" : "scheduled"); if (panic) { /* In case of panic, erase immediately */ mtdoops_erase(cxt); } else { /* Otherwise, schedule work to erase it "nicely" */ schedule_work(&cxt->work_erase); } } else { pr_debug("ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); } } static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; size_t retlen; struct mtdoops_hdr *hdr; int ret; if (test_and_set_bit(0, &cxt->oops_buf_busy)) return; /* Add mtdoops header to the buffer */ hdr = (struct mtdoops_hdr *)cxt->oops_buf; hdr->seq = cxt->nextcount; hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2; hdr->timestamp = ktime_get_real(); if (panic) { ret = mtd_panic_write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); if (ret == -EOPNOTSUPP) { pr_err("Cannot write from panic without panic_write\n"); goto out; } } else ret = mtd_write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); if (retlen != record_size || ret < 0) pr_err("write failure at %ld (%td of %ld written), error %d\n", cxt->nextpage * record_size, retlen, record_size, ret); mark_page_used(cxt, cxt->nextpage); // memset(cxt->oops_buf, 0xff, record_size); // mtdoops_inc_counter(cxt, panic); out: clear_bit(0, &cxt->oops_buf_busy); } static void mtdoops_workfunc_write(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_write); mtdoops_write(cxt, 0); } static void find_next_position(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; struct mtdoops_hdr hdr; int ret, page, maxpos = 0; u32 maxcount = 0xffffffff; size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { if (mtd_block_isbad(mtd, page * record_size)) continue; /* Assume the page is used */ mark_page_used(cxt, page); ret = mtd_read(mtd, page * record_size, sizeof(hdr), &retlen, (u_char *)&hdr); if (retlen != sizeof(hdr) || (ret < 0 && !mtd_is_bitflip(ret))) { pr_err("read failure at %ld (%zu of %zu read), err %d\n", page * record_size, retlen, sizeof(hdr), ret); continue; } if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff) mark_page_unused(cxt, page); if (hdr.seq == 0xffffffff || (hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 && hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2)) continue; if (maxcount == 0xffffffff) { maxcount = hdr.seq; maxpos = page; } else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) { maxcount = hdr.seq; maxpos = page; } else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) { maxcount = hdr.seq; maxpos = page; } else if (hdr.seq > maxcount && hdr.seq > 0xc0000000 && maxcount > 0x80000000) { maxcount = hdr.seq; maxpos = page; } } if (maxcount == 0xffffffff) { cxt->nextpage = cxt->oops_pages - 1; cxt->nextcount = 0; } else { cxt->nextpage = maxpos; cxt->nextcount = maxcount; } mtdoops_inc_counter(cxt, 0); } static void mtdoops_add_reason(char *oops_buf, int reason, enum mtdoops_log_type type, int index, int nextpage) { char str_buf[512] = {0}; int ret_len = 0; struct timespec64 now; struct tm ts; char temp_buf[32] = {0}; int temp_len = 0; char BootMode[20] = {0}; unsigned long local_time; ktime_get_coarse_real_ts64(&now); /*set title time to UTC+8*/ local_time = (unsigned long)(now.tv_sec + 8 * 60 * 60); time64_to_tm(local_time, 0, &ts); if (boot_mode == 0) { strcpy(BootMode, "normal"); } else if (boot_mode == 1) { strcpy(BootMode, "recovery"); } else if (boot_mode == 2) { strcpy(BootMode, "poweroff_charger"); } temp_len = snprintf(temp_buf, 32,"\n ---mtdoops report start--- \n"); memcpy(oops_buf, temp_buf, temp_len); ret_len = snprintf(str_buf, 200, "\n```\n## Oops_Index: %d\n### Build: %s\n## REASON: %s\n#### LOG TYPE:%s\n## BOOT MODE:%s\n##### %04ld-%02d-%02d %02d:%02d:%02d\n```c\n", index, build_fingerprint, kdump_reason[reason], log_type[type], BootMode, ts.tm_year+1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec); if(ret_len >= sizeof(str_buf)) ret_len = sizeof(str_buf); memcpy(oops_buf + temp_len, str_buf, ret_len); } static void mtdoops_add_pmsg_head(char *oops_buf, enum mtdoops_log_type type) { char str_buf[80] = {0}; int ret_len = 0; struct timespec64 now; struct tm ts; unsigned long local_time; ktime_get_coarse_real_ts64(&now); local_time = (unsigned long)(now.tv_sec + 8 * 60 * 60); time64_to_tm(local_time, 0, &ts); ret_len = snprintf(str_buf, 80, "\n```\n#### LOG TYPE:%s\n#####%04ld-%02d-%02d %02d:%02d:%02d\n```\n", log_type[type], ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec); memcpy(oops_buf, str_buf, ret_len); } static void mtdoops_do_dump(struct kmsg_dumper *dumper, enum mtd_dump_reason reason) { struct mtdoops_context *cxt = container_of(dumper, struct mtdoops_context, dump); struct kmsg_dump_iter iter; size_t ret_len = 0; void *pmsg_buffer_start = NULL; struct pmsg_buffer_hdr *p_hdr = NULL; int j = 0; int ret = 0; static int do_dump_count = 0; if(cxt->mtd == NULL) return; if(reason == KMSG_DUMP_SHUTDOWN || reason == KMSG_DUMP_EMERG) return; /* Only dump oopses if dump_oops is set */ if (reason == KMSG_DUMP_OOPS && !dump_oops) return; do_dump_count++; pr_err("%s start , count = %d , page = %d, reason = %d, dump_count = %d\n", __func__, cxt->nextcount, cxt->nextpage, reason, do_dump_count); if(do_dump_count>1) { for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); } kmsg_dump_rewind(&iter); if (test_and_set_bit(0, &cxt->oops_buf_busy)) return; kmsg_dump_get_buffer(&iter, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, lkmsg_record_size - MTDOOPS_HEADER_SIZE, &ret_len); clear_bit(0, &cxt->oops_buf_busy); mtdoops_add_reason(cxt->oops_buf + MTDOOPS_HEADER_SIZE, reason, MTDOOPS_TYPE_DMESG, cxt->nextcount, cxt->nextpage); pmsg_buffer_start = ioremap( ((cxt->pmsg_data.mem_address + cxt->pmsg_data.mem_size)- cxt->pmsg_data.pmsg_size), cxt->pmsg_data.mem_size); if (!device_base) { printk(KERN_ERR "ioremap failed!\n"); } p_hdr = (struct pmsg_buffer_hdr *)pmsg_buffer_start; pr_err("mtdoops_do_dump pmsg paddr = 0x%p \n", pmsg_buffer_start); if(p_hdr->sig == 0x43474244) { void *oopsbuf = cxt->oops_buf + (MTDOOPS_HEADER_SIZE + ret_len); uint8_t *p_buff_end = (uint8_t *)p_hdr->data + atomic_read(&p_hdr->size); int pmsg_cp_size = 0; int pstart = p_hdr->start.counter; int psize = p_hdr->size.counter; pmsg_cp_size = (record_size - (ret_len + MTDOOPS_HEADER_SIZE)); if (psize <= pmsg_cp_size) pmsg_cp_size = psize; if (pstart >= pmsg_cp_size) { memcpy(oopsbuf, p_hdr->data, pmsg_cp_size); } else { memcpy(oopsbuf, p_buff_end - (pmsg_cp_size - pstart), pmsg_cp_size - pstart); memcpy(oopsbuf + (pmsg_cp_size - pstart), p_hdr->data, pstart); } mtdoops_add_pmsg_head(cxt->oops_buf + (MTDOOPS_HEADER_SIZE + ret_len), MTDOOPS_TYPE_PMSG); } else pr_err("mtdoops: read pmsg failed sig = 0x%x \n", p_hdr->sig); if (reason == KMSG_DUMP_OOPS || reason == KMSG_DUMP_PANIC) { /* Panics must be written immediately */ mtdoops_write(cxt, 1); } else { /*we should write log immediately , if use work to write, *ufs will shutdown before write log finish */ mtdoops_write(cxt, 0); } pr_err("mtdoops_do_dump() finish \n"); } static int mtdoops_reboot_nb_handle(struct notifier_block *this, unsigned long event, void *ptr) { enum mtd_dump_reason reason; struct mtdoops_context *cxt = &oops_cxt; if (event == SYS_RESTART) reason = MTD_DUMP_RESTART; else if(event == SYS_POWER_OFF) reason = MTD_DUMP_POWEROFF; else return NOTIFY_OK; mtdoops_do_dump(&cxt->dump, reason); return NOTIFY_OK; } static int pwrkey_long_press_irq_event(struct notifier_block *this, unsigned long event, void *ptr) { struct mtdoops_context *cxt = &oops_cxt; mtdoops_do_dump(&cxt->dump, MTD_DUMP_LONG_PRESS); return NOTIFY_DONE; } static void mtdoops_do_null(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { return; } static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; u64 mtdoops_pages = div_u64(mtd->size, record_size); int err; if (!strcmp(mtd->name, mtddev)) cxt->mtd_index = mtd->index; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (mtd->size < mtd->erasesize * 2) { pr_err("MTD partition %d not big enough for mtdoops\n", mtd->index); return; } if (mtd->erasesize < record_size) { pr_err("eraseblock size of MTD partition %d too small\n", mtd->index); return; } if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { pr_err("mtd%d is too large (limit is %d MiB)\n", mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); return; } /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(array_size(sizeof(unsigned long), DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG))); if (!cxt->oops_page_used) { pr_err("could not allocate page array\n"); return; } cxt->dump.max_reason = KMSG_DUMP_MAX; cxt->dump.dump = mtdoops_do_null; err = kmsg_dump_register(&cxt->dump); if (err) { pr_err("registering kmsg dumper failed, error %d\n", err); vfree(cxt->oops_page_used); cxt->oops_page_used = NULL; return; } /*for restart and power off*/ cxt->reboot_nb.notifier_call = mtdoops_reboot_nb_handle; cxt->reboot_nb.priority = 255; register_reboot_notifier(&cxt->reboot_nb); cxt->pwrkey_long_press_nb.notifier_call = pwrkey_long_press_irq_event; cxt->pwrkey_long_press_nb.priority = 255; raw_notifier_chain_register(&pwrkey_irq_notifier_list, &cxt->pwrkey_long_press_nb); cxt->mtd = mtd; cxt->oops_pages = (int)mtd->size / record_size; find_next_position(cxt); pr_info("Attached to MTD device %d\n", mtd->index); } static void mtdoops_notify_remove(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (kmsg_dump_unregister(&cxt->dump) < 0) pr_warn("could not unregister kmsg_dumper\n"); unregister_reboot_notifier(&cxt->reboot_nb); cxt->mtd = NULL; flush_work(&cxt->work_erase); flush_work(&cxt->work_write); } static struct mtd_notifier mtdoops_notifier = { .add = mtdoops_notify_add, .remove = mtdoops_notify_remove, }; static int mtdoops_parse_dt_u32(struct platform_device *pdev, const char *propname, u32 default_value, u32 *value) { u32 val32 = 0; int ret; ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); if (ret == -EINVAL) { /* field is missing, use default value. */ val32 = default_value; } else if (ret < 0) { pr_err("failed to parse property %s: %d\n", propname, ret); return ret; } /* Sanity check our results. */ if (val32 > INT_MAX) { pr_err("%s %u > INT_MAX\n", propname, val32); return -EOVERFLOW; } *value = val32; return 0; } static int mtdoops_pmsg_probe(struct platform_device *pdev) { struct mtdoops_context *cxt = &oops_cxt; struct resource *res; u32 value; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_err("failed to locate DT /reserved-memory resource\n"); return -EINVAL; } cxt->pmsg_data.mem_size = resource_size(res); cxt->pmsg_data.mem_address = res->start; #define parse_u32(name, field, default_value) { \ ret = mtdoops_parse_dt_u32(pdev, name, default_value, \ &value); \ if (ret < 0) \ return ret; \ field = value; \ } parse_u32("console-size", cxt->pmsg_data.console_size, 0); parse_u32("pmsg-size", cxt->pmsg_data.pmsg_size, 0); #undef parse_u32 pr_err( "pares mtd_dt, mem_address =0x%llx, mem_size =0x%lx \n", cxt->pmsg_data.mem_address, cxt->pmsg_data.mem_size); pr_err( "pares mtd_dt, pmsg_size =0x%lx, console-size =0x%lx \n", cxt->pmsg_data.pmsg_size, cxt->pmsg_data.console_size); return 0; } static const struct of_device_id dt_match[] = { { .compatible = "xiaomi,mtdoops_pmsg" }, {} }; static struct platform_driver mtdoops_pmsg_driver = { .probe = mtdoops_pmsg_probe, .driver = { .name = "mtdoops_pmsg", .of_match_table = dt_match, }, }; static int __init mtdoops_init(void) { struct mtdoops_context *cxt = &oops_cxt; int mtd_index; char *endp; if (strlen(mtddev) == 0) { pr_err("mtd device (mtddev=name/number) must be supplied\n"); return -EINVAL; } if ((record_size & 4095) != 0) { pr_err("record_size must be a multiple of 4096\n"); return -EINVAL; } if (record_size < 4096) { pr_err("record_size must be over 4096 bytes\n"); return -EINVAL; } /* Setup the MTD device to use */ cxt->mtd_index = -1; mtd_index = simple_strtoul(mtddev, &endp, 0); if (*endp == '\0') cxt->mtd_index = mtd_index; cxt->oops_buf = kmalloc(record_size, GFP_KERNEL); if (!cxt->oops_buf) return -ENOMEM; memset(cxt->oops_buf, 0xff, record_size); cxt->oops_buf_busy = 0; INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); platform_driver_register(&mtdoops_pmsg_driver); register_mtd_user(&mtdoops_notifier); return 0; } static void __exit mtdoops_exit(void) { struct mtdoops_context *cxt = &oops_cxt; unregister_mtd_user(&mtdoops_notifier); kfree(cxt->oops_buf); vfree(cxt->oops_page_used); } module_init(mtdoops_init); module_exit(mtdoops_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver"); 问题堆栈对应mtdoops.c文件上传,怎么修复这个问题
10-30
extern int tcp_v4_rcv(struct sk_buff *skb); #if IS_ENABLED(CONFIG_IPV6) /*even though we are checking IPV6 for both Built-in and module, * but when built as module this will casue build error as * BLOG_LOCALIN_TCP is always Built-in */ extern int tcp_v6_rcv(struct sk_buff *skb); #else int tcp_v6_rcv(struct sk_buff *skb) { /* when IPv6 is not enabled we dont expect any packets here */ BUG(); } #endif static inline struct sk_buff *bcm_find_skb_by_flowid(uint32_t flowid) { /* TODO add this function later,needed for coalescing */ return NULL; } static inline int set_skb_fields(struct sk_buff *skb, BlogFcArgs_t *fc_args) { struct net_device *dev_p; struct dst_entry *dst_p; dev_p = bcm_get_netdev_by_id_nohold(fc_args->local_rx_devid); dst_p = blog_get_dstentry_by_id(fc_args->dst_entry_id); if(!dev_p || !dst_p) return -1; skb->dev = dev_p; skb_dst_set(skb, dst_p); skb->skb_iif = dev_p->ifindex; return 0; } static inline void position_skb_ptrs_to_transport(struct sk_buff *skb, BlogFcArgs_t *fc_args) { /*initialize ip & tcp header related fields in skb */ skb_set_mac_header(skb, 0); skb_set_network_header(skb, fc_args->tx_l3_offset); skb_set_transport_header(skb, fc_args->tx_l4_offset); /*position data pointer to start of TCP hdr */ skb_pull(skb,fc_args->tx_l4_offset); skb->pkt_type = PACKET_HOST; return; } static inline struct sk_buff * __bcm_tcp_prep_skb(pNBuff_t pNBuff, BlogFcArgs_t *fc_args) { struct sk_buff *skb; if(IS_FKBUFF_PTR(pNBuff)) { FkBuff_t *fkb = PNBUFF_2_FKBUFF(pNBuff); /* Translate the fkb to skb */ /* find the skb for flowid or allocate a new skb */ skb = bcm_find_skb_by_flowid(fkb->flowid); if(!skb) { skb = skb_xlate_dp(fkb, NULL); if(!skb) { goto fail; } } skb->mark=0; skb->priority=0; } else { skb = PNBUFF_2_SKBUFF(pNBuff); /* Remove any debris in the socket control block * used by IPCB,IP6CB and TCP_SKB_CB * note: not needed for fkb's as entire skb is cleared in skb_xlate_dp above */ memset(skb->cb, 0, sizeof(skb->cb)); } if (unlikely(set_skb_fields(skb, fc_args) != 0)) goto fail; position_skb_ptrs_to_transport(skb, fc_args); return skb; fail: if (skb) dev_kfree_skb_any(skb); else nbuff_free(pNBuff); return NULL; } struct sk_buff * bcm_tcp_prep_skb(pNBuff_t pNBuff, BlogFcArgs_t *fc_args) { return __bcm_tcp_prep_skb(pNBuff, fc_args); } EXPORT_SYMBOL(bcm_tcp_prep_skb); #ifdef BCM_TCP_V4_TASK static int g_bcm_tcp_task_en = 1; typedef struct { spinlock_t lock; struct sk_buff_head input_q; struct sk_buff_head process_q; struct task_struct *task; wait_queue_head_t thread_wqh; int work_avail; } bcm_tcp_queue_t; typedef struct { struct sk_buff *skb_p; } bcm_tcp_item_t; #define TCP_RCVTSK_LOCK(_c) spin_lock_bh(&((_c)->lock)) #define TCP_RCVTSK_UNLK(_c) spin_unlock_bh(&((_c)->lock)) #define MAX_BCM_TCP_INPUT_LEN (1024) #define MAX_BCM_TCP_BUDGET (256) static bcm_tcp_queue_t bcm_tcp_async_q; #define WAKEUP_BCM_TCP_TASK() do { \ wake_up_interruptible(&(bcm_tcp_async_q.thread_wqh)); \ } while (0) static inline void __bcm_tcp_enqueue(struct sk_buff *skb) { if (skb) { bcm_tcp_queue_t *async_q = (bcm_tcp_queue_t *) (&bcm_tcp_async_q); TCP_RCVTSK_LOCK(async_q); if(likely(skb_queue_len(&(async_q->input_q))< MAX_BCM_TCP_INPUT_LEN )) { skb_queue_tail(&(async_q->input_q),skb); skb = NULL; if(!(async_q->work_avail)) { async_q->work_avail = 1; WAKEUP_BCM_TCP_TASK(); } } TCP_RCVTSK_UNLK(async_q); } if(skb) __kfree_skb(skb); } void bcm_tcp_enqueue(struct sk_buff *skb) { __bcm_tcp_enqueue(skb); } EXPORT_SYMBOL(bcm_tcp_enqueue); /* inject the packet into ipv4_tcp_stack directly from the network driver */ static inline int bcm_tcp_v4_recv_queue(pNBuff_t pNBuff, struct net_device *txdev, BlogFcArgs_t *fc_args) { struct sk_buff *skb; skb = __bcm_tcp_prep_skb(pNBuff, fc_args); if(skb) { skb->protocol = htons(ETH_P_IP); if(g_bcm_tcp_task_en) /*hand over pkt to bcm_tcp_task()*/ __bcm_tcp_enqueue(skb); else { /* * bh_disable is needed to prevent deadlock on sock_lock when TCP timers * are executed */ local_bh_disable(); tcp_v4_rcv(skb); local_bh_enable(); } } return 0; } /* inject the packet into ipv6_tcp_stack directly from the network driver */ static inline int bcm_tcp_v6_recv_queue(pNBuff_t pNBuff, struct net_device *txdev, BlogFcArgs_t *fc_args) { struct sk_buff *skb; skb = __bcm_tcp_prep_skb(pNBuff, fc_args); if(skb) { skb->protocol = htons(ETH_P_IPV6); IP6CB(skb)->iif = skb->dev->ifindex; IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); if(g_bcm_tcp_task_en) /*hand over pkt to bcm_tcp_task()*/ __bcm_tcp_enqueue(skb); else { /* * bh_disable is needed to prevent deadlock on sock_lock when TCP timers * are executed */ local_bh_disable(); tcp_v6_rcv(skb); local_bh_enable(); } } return 0; } static int bcm_tcp_recv_thread_func(void *thread_data) { unsigned int budget; struct sk_buff *skb; bcm_tcp_queue_t *async_q = NULL; async_q = (bcm_tcp_queue_t *) thread_data; while (1) { wait_event_interruptible( (async_q->thread_wqh), kthread_should_stop() || (async_q->work_avail) ); if (kthread_should_stop()) { printk(KERN_INFO "kthread_should_stop detected in wfd\n"); break; } budget = MAX_BCM_TCP_BUDGET; if(skb_queue_len(&(async_q->process_q))<= MAX_BCM_TCP_BUDGET) { TCP_RCVTSK_LOCK(async_q); if(!skb_queue_empty(&(async_q->input_q))) { skb_queue_splice_tail_init(&(async_q->input_q),&(async_q->process_q)); } TCP_RCVTSK_UNLK(async_q); } /* * bh_disable is needed to prevent deadlock on sock_lock when TCP timers * are executed */ local_bh_disable(); while(likely(budget-- && (skb = __skb_dequeue(&(async_q->process_q))) )) { if(skb->protocol == htons(ETH_P_IPV6)) tcp_v6_rcv(skb); else tcp_v4_rcv(skb); } local_bh_enable(); async_q->work_avail = (!skb_queue_empty(&(async_q->process_q))) ? 1 : 0; // No more work in process queue , double check input queue. if(!async_q->work_avail) { TCP_RCVTSK_LOCK(async_q); if(!skb_queue_empty(&(async_q->input_q))) { async_q->work_avail = 1; } TCP_RCVTSK_UNLK(async_q); } /* we still have packets in Q, reschedule the task */ if (async_q->work_avail){ schedule(); } } return 0; } struct task_struct *create_bcm_tcp_task(bcm_tcp_queue_t *async_q) { struct task_struct *tsk; int cpu_num = num_online_cpus(); unsigned int bind_mask = 0x00; spin_lock_init(&async_q->lock); async_q->work_avail = 0; init_waitqueue_head(&(async_q->thread_wqh)); skb_queue_head_init(&(async_q->input_q)); skb_queue_head_init(&(async_q->process_q)); tsk = kthread_create(bcm_tcp_recv_thread_func, async_q, "bcm_tcp_task"); if (IS_ERR(tsk)) { printk(KERN_EMERG "bcm_tcp_task creation failed\n"); return NULL; } async_q->task = tsk; //AFFINITY with non-1st (wl0) and Non-last (Archer) CORE if(cpu_num>2) { struct cpumask aff_mask; int cpuid; cpumask_clear(&aff_mask); for(cpuid = 1; cpuid<=cpu_num ;cpuid++) { if(cpuid != 1 && cpuid != cpu_num) { cpumask_or(&aff_mask,&aff_mask,(cpumask_of(cpuid-1))); bind_mask |= (1<<(cpuid-1)); } } printk(" %s:%d bind_mask:0x%X\n",__FUNCTION__,__LINE__,bind_mask); set_cpus_allowed_ptr(async_q->task,&aff_mask); } wake_up_process(tsk); printk(KERN_EMERG "bcm_tcp_task created successfully with budget %d ,cpumask:0x%X\n", MAX_BCM_TCP_BUDGET,bind_mask); return tsk; } static struct proc_dir_entry *proc_bcm_tcp_recv_dir = NULL; /* /proc/bcm_tcp_recv */ static struct proc_dir_entry *proc_bcm_tcp_recv_ops_file = NULL; /* /proc/bcm_tcp_recv/operate */ static ssize_t bcm_tcp_recv_file_write(struct file *file, const char __user *buf, size_t cnt, loff_t *ppos); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,20,0)) static const struct proc_ops bcm_tcp_recv_fops = { .proc_write = bcm_tcp_recv_file_write, }; #else static const struct file_operations bcm_tcp_recv_fops = { .owner = THIS_MODULE, .write = bcm_tcp_recv_file_write, }; #endif static ssize_t bcm_tcp_recv_file_write(struct file *file, const char __user *buf, size_t cnt, loff_t *ppos) { char input[64]=""; char ACT=' '; if(cnt < 1) return -EFAULT; if (cnt > 64) cnt = 64; if (copy_from_user(input, buf, cnt) != 0) return -EFAULT; input[cnt-1] = '\0'; /* Command format : Enable : 1 Disable : 0 */ ACT = input[0]; switch(ACT) { case '1': g_bcm_tcp_task_en= 1 ; printk("g_bcm_tcp_task_en:%d \n",g_bcm_tcp_task_en); break; case '0': g_bcm_tcp_task_en= 0; printk("g_bcm_tcp_task_en:%d \n",g_bcm_tcp_task_en); break; default: printk("g_bcm_tcp_task_en:%d \n",g_bcm_tcp_task_en); break; } return cnt; } /** * ----------------------------------------------------------------------------- * Function : initialize the proc entry * ----------------------------------------------------------------------------- */ int bcm_tcp_recv_proc_init(void) { if (!(proc_bcm_tcp_recv_dir = proc_mkdir("bcm_tcp_recv_task", NULL))) goto fail; if (!(proc_bcm_tcp_recv_ops_file = proc_create("bcm_tcp_recv_task/operate", 0644, NULL, &bcm_tcp_recv_fops))) goto fail; return 0; fail: printk("%s %s: Failed to create proc /bcm_tcp_recv_task\n", __FILE__, __FUNCTION__); remove_proc_entry("bcm_tcp_recv_task" ,NULL); return (-1); } EXPORT_SYMBOL(bcm_tcp_recv_proc_init); /** * ----------------------------------------------------------------------------- * Function : initialize the proc entry * ----------------------------------------------------------------------------- */ void bcm_tcp_recv_proc_fini(void) { remove_proc_entry("operate", proc_bcm_tcp_recv_dir); remove_proc_entry("bcm_tcp_recv", NULL); } EXPORT_SYMBOL(bcm_tcp_recv_proc_fini); static int __init bcm_tcp_init(void) { bcm_tcp_async_q.task = create_bcm_tcp_task(&bcm_tcp_async_q); if(bcm_tcp_async_q.task == NULL) BUG(); else { bcm_tcp_recv_proc_init(); } return 0; } subsys_initcall(bcm_tcp_init); #endif /* inject the packet into ipv4_tcp_stack directly from the network driver */ static inline int bcm_tcp_v4_recv(pNBuff_t pNBuff, struct net_device *txdev, BlogFcArgs_t *fc_args) { struct sk_buff *skb; skb = __bcm_tcp_prep_skb(pNBuff, fc_args); if (skb) { skb->protocol = htons(ETH_P_IP); /* * bh_disable is needed to prevent deadlock on sock_lock when TCP timers * are executed */ local_bh_disable(); tcp_v4_rcv(skb); local_bh_enable(); } return 0; } /* inject the packet into ipv6_tcp_stack directly from the network driver */ static inline int bcm_tcp_v6_recv(pNBuff_t pNBuff, struct net_device *txdev, BlogFcArgs_t *fc_args) { struct sk_buff *skb; skb = __bcm_tcp_prep_skb(pNBuff, fc_args); if (skb) { skb->protocol = htons(ETH_P_IPV6); /* always use ifindex of skb->dev as skb_dst can be set in tcp_v6_early_demux * and it's possible skb_dst is different from skb->dev, when Src IP used * for creating socket/route is not part of the outgoing interface */ IP6CB(skb)->iif = skb->dev->ifindex; IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); /*TODO check if we need to consider any IPV6 options */ /* * bh_disable is needed to prevent deadlock on sock_lock when TCP timers * are executed */ local_bh_disable(); tcp_v6_rcv(skb); local_bh_enable(); } return 0; } static int bcm_tcp_recv(pNBuff_t pNBuff, struct net_device *txdev) { /* The expectation is that this dev_hard_xmit() function will never be called. Instead the function with args parameter (i.e. bcm_tcp_recv_args) would be invoked */ BUG(); return 0; } int bcm_tcp_recv_args(pNBuff_t pNBuff, struct net_device *txdev, BlogFcArgs_t *fc_args) { if (fc_args->tx_is_ipv4) { if (fc_args->use_tcplocal_xmit_enq_fn) { return bcm_tcp_v4_recv_queue(pNBuff, txdev, fc_args); } else { return bcm_tcp_v4_recv(pNBuff, txdev, fc_args); } } else { if (fc_args->use_tcplocal_xmit_enq_fn) { return bcm_tcp_v6_recv_queue(pNBuff, txdev, fc_args); } else { return bcm_tcp_v6_recv(pNBuff, txdev, fc_args); } } return 0; } static const struct net_device_ops bcm_tcp_netdev_ops = { .ndo_open = NULL, .ndo_stop = NULL, .ndo_start_xmit = (HardStartXmitFuncP)bcm_tcp_recv, .ndo_set_mac_address = NULL, .ndo_do_ioctl = NULL, .ndo_tx_timeout = NULL, .ndo_get_stats = NULL, .ndo_change_mtu = NULL }; struct net_device *blogtcp_local_dev=NULL; static void bcm_blogtcp_dev_setup(struct net_device *dev) { dev->type = ARPHRD_RAWIP; dev->mtu = BCM_MAX_MTU_PAYLOAD_SIZE; dev->netdev_ops = &bcm_tcp_netdev_ops; bcm_netdev_ext_field_set(dev, blog_stats_flags, BLOG_DEV_STAT_FLAG_INCLUDE_ALL); bcm_netdev_ext_field_set(dev, dev_xmit_args, bcm_tcp_recv_args); netdev_accel_tx_fkb_set(dev); } void bcm_tcp_register_netdev(void) { int ret; blogtcp_local_dev = alloc_netdev(0, "blogtcp_local", NET_NAME_UNKNOWN, bcm_blogtcp_dev_setup); if ( blogtcp_local_dev ) { ret = register_netdev(blogtcp_local_dev); if (ret) { printk(KERN_ERR "blogtcp_local register_netdev failed\n"); free_netdev(blogtcp_local_dev); blogtcp_local_dev = NULL; } else printk("blogtcp_local netdev registered successfully \n"); } } inline static int encap_offset(struct sk_buff *skb, uint32_t * encap) { /*start from innermost IP always */ int offset = skb->transport_header - skb->network_header; *encap = TYPE_IP; return offset; } int bcm_tcp_blog_emit(struct sk_buff *skb, struct sock *sk) { if(skb->blog_p && skb->blog_p->l2_mode) { blog_skip(skb,blog_skip_reason_l2_local_termination); } else if( (sk && sk->sk_state == TCP_ESTABLISHED) && skb->blog_p && (skb->blog_p->rx.info.bmap.ETH_802x == 1)) { struct net_device *tmpdev; uint32_t encap ; int offset = encap_offset(skb, &encap); if(skb->dev == NULL) { /* Error */ return -1; } skb_push(skb,offset); tmpdev = skb->dev; skb->dev = blogtcp_local_dev; skb->blog_p->local_rx_devid = bcm_netdev_ext_field_get(tmpdev, devid); skb->blog_p->use_xmit_args = 1; #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) { struct tcp_sock *tp = tcp_sk(sk); if(tp->tcp_discard) { skb->blog_p->tcp_discard = 1; skb->blog_p->fro = 1; } } #endif skb->blog_p->local_tcp = 1; skb->blog_p->hw_cso = 1; if (is_netdev_accel_gdx_tx(blogtcp_local_dev)) { blog_emit_generic(skb, blogtcp_local_dev, BLOG_GENPHY); } else { blog_emit(skb, blogtcp_local_dev, encap, 0, BLOG_TCP_LOCALPHY); } skb->dev = tmpdev; skb_pull(skb,offset); } else{ /*unsupported local tcp */ blog_skip(skb, blog_skip_reason_local_tcp_termination); } return 0; } extern int bcmnet_configure_gdx_accel(struct net_device *dev, bcmnet_accel_t *accel_p); static int bcm_tcp_module_load_notify(struct notifier_block *self, unsigned long val, void *data) { bcmnet_accel_t accel={}; int bp3_htoa_license; if (!strcmp("gdx", ((struct module *)data)->name)) { bp3_htoa_license = bcm_license_check(BP3_FEATURE_HTOA); if (bp3_htoa_license <= 0) { /* No valid htoa license. Do not enable GDX */ printk("%s: ***No valid HTOA license. Do not enable GDX for local tcp acceleration***\n", __func__); return 0; } printk("%s: ***HTOA license present. Enable GDX for local tcp acceleration***\n", __func__); switch (val) { case MODULE_STATE_LIVE: #if defined(CONFIG_BCM_GDX_HW) accel.gdx_hw = 1; #endif accel.gdx_tx = 1; bcmnet_configure_gdx_accel(blogtcp_local_dev, &accel); break; case MODULE_STATE_GOING: #if defined(CONFIG_BCM_GDX_HW) accel.gdx_hw = 0; #endif accel.gdx_tx = 0; bcmnet_configure_gdx_accel(blogtcp_local_dev, &accel); break; default: return 0; } } return 0; } static struct notifier_block bcm_tcp_module_load_nb = { .notifier_call = bcm_tcp_module_load_notify, }; static int __init bcm_tcp_accel_init(void) { bcm_tcp_register_netdev(); register_module_notifier(&bcm_tcp_module_load_nb); return 0; } fs_initcall(bcm_tcp_accel_init);继续结合分析localin加速 并分析如何针对FTP报文关闭Localin加速
最新发布
11-18
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2021, Intel Corporation. #include <linux/bitfield.h> #include <linux/crc8.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/regmap.h> #include <uapi/linux/aspeed-espi-mmbi.h> #include <dt-bindings/mmbi/protocols.h> #define DEVICE_NAME "mmbi" #define MAX_NO_OF_SUPPORTED_CHANNELS 8 #define MAX_NO_OF_SUPPORTED_PROTOCOLS 5 /* 20 Bits for H2B/B2H Write/Read Pointers */ #define H2B_WRITE_POINTER_MASK GENMASK(19, 0) #define B2H_READ_POINTER_MASK GENMASK(19, 0) #define MMBI_HDR_LENGTH_MASK GENMASK(23, 0) #define MMBI_HDR_TYPE_MASK GENMASK(31, 24) #define HOST_RESET_REQUEST_BIT BIT(31) #define HOST_READY_BIT BIT(31) #define ESPI_SCI_STATUS_BIT BIT(24) #define GET_H2B_WRITE_POINTER(x) ((x) & H2B_WRITE_POINTER_MASK) #define GET_B2H_READ_POINTER(x) ((x) & B2H_READ_POINTER_MASK) #define GET_HOST_RESET_REQ_BIT(x) ((x) & HOST_RESET_REQUEST_BIT) #define GET_HOST_READY_BIT(x) ((x) & HOST_READY_BIT) #define HOST_READ_SCI_STATUS_BIT(x) ((x) & ESPI_SCI_STATUS_BIT) #define MMBI_CRC8_POLYNOMIAL 0x07 DECLARE_CRC8_TABLE(mmbi_crc8_table); typedef u8 protocol_type; struct host_rop { unsigned int b2h_wp : 20; /* Offset where BMC can write next data in B2H */ unsigned int reserved1 : 11; unsigned int b_rdy : 1; /* BMC ready bit */ unsigned int h2b_rp : 20; /* Offset till where bmc read data in H2B */ unsigned int reserved2 : 11; unsigned int b_rst : 1; /* BMC reset request bit */ }; struct host_rwp { unsigned int h2b_wp : 20; /* Offset where HOST can write next data in H2B */ unsigned int reserved1 : 11; unsigned int h_rdy : 1; /* Host ready bit */ unsigned int b2h_rp : 20; /* Offset till where host read data in B2H */ unsigned int reserved2 : 11; unsigned int h_rst : 1; /* host reset request bit */ }; struct buffer_type_desc { u32 host_rop_p; u32 host_rwp_p; u8 msg_protocol_type; u8 host_int_type; u16 global_sys_interrupt; u8 bmc_int_type; u32 bmc_int_a; u8 bmc_int_v; } __packed; struct mmbi_cap_desc { u8 signature[6]; u8 version; u8 instance_num; u32 nex_inst_base_addr; u32 b2h_ba; /* B2H buffer base offset (i.e 0x48) */ u32 h2b_ba; /* H2B buffer base offset (i.e 0x08) */ u16 b2h_d; /* Multiple of 16 Bytes (Max 1MB) */ u16 h2b_d; /* multiples of 16 bytes (Max 1MB) */ u8 buffer_type; /* Type of buffer in B2H/H2B */ u8 reserved1[7]; struct buffer_type_desc bt_desc; /* 18 bytes */ u8 reserved2[13]; u8 crc8; /* CRC-8-CCITT of the whole data structure (bytes 0 to 62) */ } __packed; struct mmbi_header { u32 data; }; struct aspeed_mmbi_protocol { struct miscdevice miscdev; struct aspeed_mmbi_channel *chan_ref; protocol_type type; bool data_available; /* * If user space application is opened for read, then only process * the data and copy to userspace. Otherwise, discard the command and * process the remaining commands (can be different protocol type) */ bool process_data; wait_queue_head_t queue; }; struct aspeed_mmbi_channel { struct aspeed_mmbi_protocol protocol[MAX_NO_OF_SUPPORTED_PROTOCOLS]; struct aspeed_espi_mmbi *priv; u8 chan_num; u8 supported_protocols[MAX_NO_OF_SUPPORTED_PROTOCOLS]; u32 b2h_cb_size; u32 h2b_cb_size; u8 *desc_vmem; u8 *hrop_vmem; u8 *b2h_cb_vmem; u8 *hrwp_vmem; u8 *h2b_cb_vmem; bool enabled; }; struct aspeed_espi_mmbi { struct regmap *map; struct regmap *pmap; struct regmap *lpc_map; struct device *dev; int irq; phys_addr_t host_map_addr; dma_addr_t mmbi_phys_addr; resource_size_t mmbi_size; u8 *dma_vaddr; struct aspeed_mmbi_channel chan[MAX_NO_OF_SUPPORTED_CHANNELS]; }; static const struct regmap_config aspeed_espi_mmbi_regmap_cfg = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x04C, }; static void raise_sci_interrupt(struct aspeed_mmbi_channel *channel) { u32 val; int retry; struct regmap *lpc_regmap = channel->priv->lpc_map; dev_dbg(channel->priv->dev, "Raising SCI interrupt...\n"); regmap_write_bits(lpc_regmap, AST_LPC_ACPIB7B4, LPC_BMC_TRIG_SCI_EVT_EN, LPC_BMC_TRIG_SCI_EVT_EN); regmap_write_bits(lpc_regmap, AST_LPC_SWCR0704, LPC_BMC_TRIG_WAKEUP_EVT_EN, LPC_BMC_TRIG_WAKEUP_EVT_EN); regmap_write_bits(lpc_regmap, AST_LPC_SWCR0B08, LPC_BMC_TRIG_WAKEUP_EVT, LPC_BMC_TRIG_WAKEUP_EVT); /* * Just asserting the SCI VW will trigger the SCI event continuosly. * So BMC must deassert SCI VW to avoid it. * ESPI098[24] reading will confirm Host read data or not. * - 0 means host read the data * - 1 means host not yet read data, so retry with 1us delay. */ retry = 30; while (retry) { if (regmap_read(channel->priv->pmap, ASPEED_ESPI_SYS_EVENT, &val)) { dev_err(channel->priv->dev, "Unable to read ESPI098\n"); break; } if (HOST_READ_SCI_STATUS_BIT(val) == 0) break; retry--; dev_dbg(channel->priv->dev, "Host SCI handler not invoked(ESPI098: 0x%0x), so retry(%d) after 1us...\n", val, retry); udelay(1); } regmap_write_bits(lpc_regmap, AST_LPC_SWCR0300, LPC_BMC_TRIG_WAKEUP_EVT_STS, LPC_BMC_TRIG_WAKEUP_EVT_STS); regmap_write_bits(lpc_regmap, AST_LPC_ACPIB3B0, LPC_BMC_TRIG_SCI_EVT_STS, LPC_BMC_TRIG_SCI_EVT_STS); } static int read_host_rwp_val(struct aspeed_mmbi_channel *channel, u32 reg, u32 *val) { int rc; rc = regmap_read(channel->priv->map, reg, val); if (rc) { dev_err(channel->priv->dev, "Unable to read Host RWP pointer\n"); return rc; } return 0; } static int get_b2h_avail_buf_len(struct aspeed_mmbi_channel *channel, ssize_t *avail_buf_len) { struct host_rop hrop; u32 b2h_rp, h_rwp1; if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } b2h_rp = GET_B2H_READ_POINTER(h_rwp1); dev_dbg(channel->priv->dev, "MMBI HRWP - b2h_rp: 0x%0x\n", b2h_rp); memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); dev_dbg(channel->priv->dev, "HROP - b2h_wp: 0x%0x, h2b_rp: 0x%0x", hrop.b2h_wp, hrop.h2b_rp); if (hrop.b2h_wp >= b2h_rp) *avail_buf_len = channel->b2h_cb_size - hrop.b2h_wp + b2h_rp; else *avail_buf_len = b2h_rp - hrop.b2h_wp; return 0; } static int get_mmbi_header(struct aspeed_mmbi_channel *channel, u32 *data_length, u8 *type, u32 *unread_data_len) { u32 h2b_wp, b2h_rp, h_rwp0, h_rwp1; struct mmbi_header header; struct host_rop hrop; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); dev_dbg(channel->priv->dev, "MMBI HROP - b2h_wp: 0x%0x, h2b_rp: 0x%0x\n", hrop.b2h_wp, hrop.h2b_rp); if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } h2b_wp = GET_H2B_WRITE_POINTER(h_rwp0); b2h_rp = GET_B2H_READ_POINTER(h_rwp1); dev_dbg(channel->priv->dev, "MMBI HRWP - h2b_wp: 0x%0x, b2h_rp: 0x%0x\n", h2b_wp, b2h_rp); if (h2b_wp >= hrop.h2b_rp) *unread_data_len = h2b_wp - hrop.h2b_rp; else *unread_data_len = channel->h2b_cb_size - hrop.h2b_rp + h2b_wp; if (*unread_data_len < sizeof(struct mmbi_header)) { dev_dbg(channel->priv->dev, "No data to read(%d -%d)\n", h2b_wp, hrop.h2b_rp); return -EAGAIN; } dev_dbg(channel->priv->dev, "READ MMBI header from: 0x%0x\n", (u32)(channel->h2b_cb_vmem + hrop.h2b_rp)); /* Extract MMBI protocol - protocol type and length */ if ((hrop.h2b_rp + sizeof(header)) <= channel->h2b_cb_size) { memcpy(&header, channel->h2b_cb_vmem + hrop.h2b_rp, sizeof(header)); } else { ssize_t chunk_len = channel->h2b_cb_size - hrop.h2b_rp; memcpy(&header, channel->h2b_cb_vmem + hrop.h2b_rp, chunk_len); memcpy(((u8 *)&header) + chunk_len, channel->h2b_cb_vmem, sizeof(header) - chunk_len); } *data_length = FIELD_GET(MMBI_HDR_LENGTH_MASK, header.data); *type = FIELD_GET(MMBI_HDR_TYPE_MASK, header.data); return 0; } static void raise_missing_sci(struct aspeed_mmbi_channel *channel) { struct host_rop hrop; u32 h_rwp0, h_rwp1, b2h_rptr; /* Rise SCI only if Host is READY (h_rdy is 1). */ if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return; } if (!GET_HOST_READY_BIT(h_rwp0)) { // Host is not ready, no point in raising the SCI return; } memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return; } b2h_rptr = GET_B2H_READ_POINTER(h_rwp1); if (hrop.b2h_wp == b2h_rptr) { // Host has read all outstanding SCI data, // Do not raise another SCI. return; } dev_dbg(channel->priv->dev, "Host not read the data yet, so rising SCI interrupt again...\n"); raise_sci_interrupt(channel); } static void update_host_rop(struct aspeed_mmbi_channel *channel, unsigned int w_len, unsigned int r_len) { struct host_rop hrop; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); dev_dbg(channel->priv->dev, "MMBI HROP - b2h_wp: 0x%0x, h2b_rp: 0x%0x\n", hrop.b2h_wp, hrop.h2b_rp); /* Advance the B2H CB offset for next write */ if ((hrop.b2h_wp + w_len) <= channel->b2h_cb_size) hrop.b2h_wp += w_len; else hrop.b2h_wp = hrop.b2h_wp + w_len - channel->b2h_cb_size; /* Advance the H2B CB offset till where BMC read data */ if ((hrop.h2b_rp + r_len) <= channel->h2b_cb_size) hrop.h2b_rp += r_len; else hrop.h2b_rp = hrop.h2b_rp + r_len - channel->h2b_cb_size; /* * Clear BMC reset request state its set: * Set BMC reset request bit to 0 * Set BMC ready bit to 1 */ if (hrop.b_rst) { dev_dbg(channel->priv->dev, "Clearing BMC reset request state\n"); hrop.b_rst = 0; hrop.b_rdy = 1; } dev_dbg(channel->priv->dev, "Updating HROP - h2b_rp: 0x%0x, b2h_wp: 0x%0x\n", hrop.h2b_rp, hrop.b2h_wp); memcpy(channel->hrop_vmem, &hrop, sizeof(hrop)); /* * Raise SCI interrupt only if B2H buffer is updated * Don't raise SCI, after BMC read the H2B buffer */ if (w_len != 0) raise_sci_interrupt(channel); } static int send_bmc_reset_request(struct aspeed_mmbi_channel *channel) { struct host_rop hrop; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); /* * Send MMBI buffer reset request: First BMC should clear its own * pointers, set Reset bit and reset BMC ready bit. * B2H Write pointer - must be set to zero * H2B read pointer - must be set to zero * BMC ready bit - Set to 0 * BMC reset bit - Set to 1 */ hrop.b2h_wp = 0; hrop.h2b_rp = 0; hrop.b_rdy = 0; hrop.b_rst = 1; dev_info(channel->priv->dev, "Send BMC reset request on MMBI channel(%d)\n", channel->chan_num); memcpy(channel->hrop_vmem, &hrop, sizeof(hrop)); /* Raise SCI interrupt */ raise_sci_interrupt(channel); return 0; } void check_host_reset_request(struct aspeed_mmbi_channel *channel) { struct host_rop hrop; u32 h_rwp1; if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return; } /* If its not host reset request, just discard */ if (!GET_HOST_RESET_REQ_BIT(h_rwp1)) return; /* Host requested for MMBI buffer reset */ memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); /* * When host request for reset MMBI buffer: * B2H Write pointer - must be set to zero * H2B read pointer - must be set to zero * BMC ready bit - No change (Set to 1) * BMC reset bit - No change (Set to 0) */ hrop.b2h_wp = 0; hrop.h2b_rp = 0; dev_info(channel->priv->dev, "Handle Host reset request on MMBI channel(%d)\n", channel->chan_num); memcpy(channel->hrop_vmem, &hrop, sizeof(hrop)); } void wake_up_device(struct aspeed_mmbi_channel *channel) { u32 req_data_len, unread_data_len; u8 type; int i; if (0 != get_mmbi_header(channel, &req_data_len, &type, &unread_data_len)) { /* Bail out as we can't read header */ return; } dev_dbg(channel->priv->dev, "%s: Length: 0x%0x, Protocol Type: %d\n", __func__, req_data_len, type); for (i = 0; channel->supported_protocols[i] != 0; i++) { if (type == channel->supported_protocols[i]) { /* * MMBI supports multiple protocols on each channel * If userspace application is not opened the device * for read /write the data, discard the data and * advance the HROP for processing next command. */ if (channel->protocol[i].process_data) { channel->protocol[i].data_available = true; wake_up(&channel->protocol[i].queue); } else { /* Discard data and advance the hrop */ update_host_rop(channel, 0, req_data_len + sizeof(struct mmbi_header)); } /* * Raise the missing SCI's by checking pointer for host * read acknowledgment. This will work around the Missing * SCI bug on host side. */ dev_warn(channel->priv->dev, "%s: Check and raise missing SCI\n", __func__); raise_missing_sci(channel); } } } static struct aspeed_mmbi_protocol *file_aspeed_espi_mmbi(struct file *file) { return container_of(file->private_data, struct aspeed_mmbi_protocol, miscdev); } static int mmbi_open(struct inode *inode, struct file *filp) { return 0; } static int mmbi_release(struct inode *inode, struct file *filp) { return 0; } static unsigned int mmbi_poll(struct file *filp, poll_table *wait) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); poll_wait(filp, &protocol->queue, wait); return protocol->data_available ? POLLIN : 0; } static ssize_t mmbi_read(struct file *filp, char *buff, size_t count, loff_t *offp) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); struct aspeed_mmbi_channel *channel = protocol->chan_ref; struct aspeed_espi_mmbi *priv = channel->priv; struct host_rop hrop; ssize_t rd_offset, rd_len; ssize_t ret; u32 unread_data_len, req_data_len; u8 type; protocol->process_data = true; if (!protocol->data_available && (filp->f_flags & O_NONBLOCK)) { // Work around: The lack of response might be cause by missing SCI // (host didn't consume the last message), check the buffer state // and retry if it's needed raise_missing_sci(channel); return -EAGAIN; } dev_dbg(priv->dev, "%s: count:%d, Type: %d\n", __func__, count, protocol->type); ret = wait_event_interruptible(protocol->queue, protocol->data_available); if (ret == -ERESTARTSYS) { ret = -EINTR; goto err_out; } ret = get_mmbi_header(channel, &req_data_len, &type, &unread_data_len); if (ret != 0) { /* Bail out as we can't read header. */ goto err_out; } dev_dbg(priv->dev, "%s: Length: 0x%0x, Protocol Type: %d, Unread data: %d\n", __func__, req_data_len, type, unread_data_len); if (req_data_len > count) { dev_err(priv->dev, "Data exceeding user space limit: %d\n", count); ret = -EFAULT; /* Discard data and advance the hrop */ update_host_rop(channel, 0, req_data_len + sizeof(struct mmbi_header)); goto err_out; } /* Check is data belongs to this device, if not wake_up corresponding device. */ if (type != protocol->type) { ret = -EFAULT; goto err_out; } memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); if ((hrop.h2b_rp + sizeof(struct mmbi_header)) <= channel->h2b_cb_size) { rd_offset = hrop.h2b_rp + sizeof(struct mmbi_header); } else { rd_offset = hrop.h2b_rp + sizeof(struct mmbi_header) - channel->h2b_cb_size; } rd_len = req_data_len; /* Extract data and copy to user space application */ dev_dbg(priv->dev, "READ MMBI Data from: 0x%0x and length: %d\n", (u32)(channel->h2b_cb_vmem + rd_offset), rd_len); if (unread_data_len < sizeof(struct mmbi_header) + rd_len) { dev_err(priv->dev, "Invalid H2B buffer (Read msg length: %d)\n", rd_len); ret = -EFAULT; goto err_out; } if ((channel->h2b_cb_size - rd_offset) >= rd_len) { if (copy_to_user(buff, channel->h2b_cb_vmem + rd_offset, rd_len)) { dev_err(priv->dev, "Failed to copy data to user space\n"); ret = -EFAULT; goto err_out; } rd_offset += rd_len; } else { ssize_t chunk_len; chunk_len = channel->h2b_cb_size - rd_offset; if (copy_to_user(buff, channel->h2b_cb_vmem + rd_offset, chunk_len)) { dev_err(priv->dev, "Failed to copy data to user space\n"); ret = -EFAULT; goto err_out; } rd_offset = 0; if (copy_to_user(buff + chunk_len, channel->h2b_cb_vmem + rd_offset, rd_len - chunk_len)) { dev_err(priv->dev, "Failed to copy data to user space\n"); ret = -EFAULT; goto err_out; } rd_offset += (rd_len - chunk_len); } *offp += rd_len; ret = rd_len; update_host_rop(channel, 0, rd_len + sizeof(struct mmbi_header)); dev_dbg(priv->dev, "%s: Return length: %d\n", __func__, ret); err_out: /* * Raise the missing SCI's by checking pointer for host * read acknowledgment. This will work around the Missing * SCI bug on host side. * */ dev_warn(priv->dev, "%s: Check and raise missing SCI\n", __func__); raise_missing_sci(channel); protocol->data_available = false; wake_up_device(channel); return ret; } static ssize_t mmbi_write(struct file *filp, const char *buffer, size_t len, loff_t *offp) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); struct aspeed_mmbi_channel *channel = protocol->chan_ref; struct aspeed_espi_mmbi *priv = channel->priv; struct mmbi_header header; struct host_rop hrop; ssize_t wt_offset; ssize_t avail_buf_len; ssize_t chunk_len; ssize_t end_offset; u32 h_rwp0; dev_dbg(priv->dev, "%s: length:%d , type: %d\n", __func__, len, protocol->type); if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } /* If Host READY bit is not set, Just discard the write. */ if (!GET_HOST_READY_BIT(h_rwp0)) { dev_dbg(channel->priv->dev, "Host not ready, discarding request...\n"); return -EAGAIN; } if (get_b2h_avail_buf_len(channel, &avail_buf_len)) { dev_dbg(priv->dev, "Failed to B2H empty buffer len\n"); return -EAGAIN; } dev_dbg(priv->dev, "B2H buffer empty space: %d\n", avail_buf_len); /* Empty space should be more than write request data size */ if (avail_buf_len <= sizeof(header) || (len > (avail_buf_len - sizeof(header)))) { dev_err(priv->dev, "Not enough space(%d) in B2H buffer\n", avail_buf_len); return -ENOSPC; } /* Fill multi-protocol header */ header.data = ((protocol->type << 24) + len); memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); wt_offset = hrop.b2h_wp; end_offset = channel->b2h_cb_size; if ((end_offset - wt_offset) >= sizeof(header)) { memcpy(channel->b2h_cb_vmem + wt_offset, &header, sizeof(header)); wt_offset += sizeof(header); } else { chunk_len = end_offset - wt_offset; memcpy(channel->b2h_cb_vmem + wt_offset, &header, chunk_len); memcpy(channel->b2h_cb_vmem, &header + chunk_len, (sizeof(header) - chunk_len)); wt_offset = (sizeof(header) - chunk_len); } /* Write the data */ if ((end_offset - wt_offset) >= len) { if (copy_from_user(&channel->b2h_cb_vmem[wt_offset], buffer, len)) { return -EFAULT; } wt_offset += len; } else { chunk_len = end_offset - wt_offset; if (copy_from_user(&channel->b2h_cb_vmem[wt_offset], buffer, chunk_len)) { return -EFAULT; } wt_offset = 0; if (copy_from_user(&channel->b2h_cb_vmem[wt_offset], buffer + chunk_len, len - chunk_len)) { return -EFAULT; } wt_offset += len - chunk_len; } *offp += len; update_host_rop(channel, len + sizeof(struct mmbi_header), 0); return len; } static int get_mmbi_config(struct aspeed_mmbi_channel *channel, void __user *userbuf) { bool h_ready; struct host_rop hrop; struct aspeed_mmbi_get_config get_conf; u32 h2b_wptr, b2h_rptr, h_rwp0, h_rwp1; if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } h2b_wptr = GET_H2B_WRITE_POINTER(h_rwp0); b2h_rptr = GET_B2H_READ_POINTER(h_rwp1); h_ready = GET_HOST_READY_BIT(h_rwp0) ? true : false; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); get_conf.h_rdy = h_ready; get_conf.h2b_wp = h2b_wptr; get_conf.b2h_rp = b2h_rptr; get_conf.h2b_rp = hrop.h2b_rp; get_conf.b2h_wp = hrop.b2h_wp; if (copy_to_user(userbuf, &get_conf, sizeof(get_conf))) { dev_err(channel->priv->dev, "copy to user failed\n"); return -EFAULT; } return 0; } static int get_b2h_empty_space(struct aspeed_mmbi_channel *channel, void __user *userbuf) { struct aspeed_mmbi_get_empty_space empty_space; ssize_t avail_buf_len; if (get_b2h_avail_buf_len(channel, &avail_buf_len)) { dev_dbg(channel->priv->dev, "Failed to B2H empty buffer len\n"); return -EAGAIN; } dev_dbg(channel->priv->dev, "B2H buffer empty space: %d\n", avail_buf_len); empty_space.length = avail_buf_len; if (copy_to_user(userbuf, &empty_space, sizeof(empty_space))) { dev_err(channel->priv->dev, "copy to user failed\n"); return -EFAULT; } return 0; } static long mmbi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); struct aspeed_mmbi_channel *channel = protocol->chan_ref; void __user *userbuf = (void __user *)arg; int ret; switch (cmd) { case ASPEED_MMBI_CTRL_IOCTL_GET_B2H_EMPTY_SPACE: ret = get_b2h_empty_space(channel, userbuf); break; case ASPEED_MMBI_CTRL_IOCTL_SEND_RESET_REQUEST: ret = send_bmc_reset_request(channel); break; case ASPEED_MMBI_CTRL_IOCTL_GET_CONFIG: ret = get_mmbi_config(channel, userbuf); break; default: dev_err(channel->priv->dev, "Command not found\n"); ret = -ENOTTY; } return ret; } static const struct file_operations aspeed_espi_mmbi_fops = { .owner = THIS_MODULE, .open = mmbi_open, .release = mmbi_release, .read = mmbi_read, .write = mmbi_write, .unlocked_ioctl = mmbi_ioctl, .poll = mmbi_poll }; static char *get_protocol_suffix(protocol_type type) { switch (type) { case MMBI_PROTOCOL_IPMI: return "ipmi"; case MMBI_PROTOCOL_SEAMLESS: return "seamless"; case MMBI_PROTOCOL_RAS_OFFLOAD: return "ras_offload"; case MMBI_PROTOCOL_MCTP: return "mctp"; case MMBI_PROTOCOL_NODE_MANAGER: return "nm"; } return NULL; } static struct mmbi_cap_desc mmbi_desc_init(struct aspeed_mmbi_channel channel) { struct mmbi_cap_desc ch_desc; memset(&ch_desc, 0, sizeof(ch_desc)); /* Per MMBI protoco spec, Set it to "$MMBI$" */ strncpy(ch_desc.signature, "$MMBI$", sizeof(ch_desc.signature)); ch_desc.version = 1; ch_desc.instance_num = channel.chan_num; /* * TODO: Add multi-channel support. Handcoded H2B start offset * to 0x8000 as we support single channel today. */ ch_desc.nex_inst_base_addr = 0; ch_desc.b2h_ba = sizeof(struct mmbi_cap_desc) + sizeof(struct host_rop); ch_desc.h2b_ba = 0x8000 + sizeof(struct host_rwp); ch_desc.b2h_d = 0x800; /* 32KB = 0x800 * 16 */ ch_desc.h2b_d = 0x800; /* 32KB = 0x800 * 16 */ ch_desc.buffer_type = 0x01; /* VMSCB */ ch_desc.bt_desc.host_rop_p = sizeof(struct mmbi_cap_desc); ch_desc.bt_desc.host_rwp_p = 0x8000; ch_desc.bt_desc.msg_protocol_type = 0x01; /* Multiple protocol type */ ch_desc.bt_desc.host_int_type = 0x01; /* SCI Triggered through eSPI VW */ ch_desc.bt_desc.global_sys_interrupt = 0x00; /* Not used */ ch_desc.bt_desc.bmc_int_type = 0x00; /* Auto - AST HW Interrupt */ ch_desc.bt_desc.bmc_int_a = 0x00; /* Not used, set to zero */ ch_desc.bt_desc.bmc_int_v = 0x00; /* Not used, set to zero */ ch_desc.crc8 = crc8(mmbi_crc8_table, (u8 *)&ch_desc, (size_t)(sizeof(ch_desc) - 1), 0); return ch_desc; } static int mmbi_channel_init(struct aspeed_espi_mmbi *priv, struct device_node *node, u8 idx) { struct device *dev = priv->dev; int rc; u8 i; u8 *h2b_vaddr, *b2h_vaddr; struct mmbi_cap_desc ch_desc; struct host_rop hrop; int no_of_protocols_enabled; u8 mmbi_supported_protocols[MAX_NO_OF_SUPPORTED_PROTOCOLS]; u32 b2h_size = (priv->mmbi_size / 2); u32 h2b_size = (priv->mmbi_size / 2); b2h_vaddr = priv->dma_vaddr; h2b_vaddr = priv->dma_vaddr + (priv->mmbi_size / 2); memset(&priv->chan[idx], 0, sizeof(struct aspeed_mmbi_channel)); priv->chan[idx].chan_num = idx; priv->chan[idx].desc_vmem = b2h_vaddr; priv->chan[idx].hrop_vmem = b2h_vaddr + sizeof(struct mmbi_cap_desc); priv->chan[idx].b2h_cb_vmem = b2h_vaddr + sizeof(struct mmbi_cap_desc) + sizeof(struct host_rop); priv->chan[idx].b2h_cb_size = b2h_size - sizeof(struct mmbi_cap_desc) - sizeof(struct host_rop); /* Set BMC ready bit */ memcpy(&hrop, priv->chan[idx].hrop_vmem, sizeof(hrop)); hrop.b_rdy = 1; memcpy(priv->chan[idx].hrop_vmem, &hrop, sizeof(hrop)); priv->chan[idx].hrwp_vmem = h2b_vaddr; priv->chan[idx].h2b_cb_vmem = h2b_vaddr + sizeof(struct host_rwp); priv->chan[idx].h2b_cb_size = h2b_size - sizeof(struct host_rwp); dev_dbg(priv->dev, "B2H mapped addr - desc: 0x%0x, hrop: 0x%0x, b2h_cb: 0x%0x\n", (size_t)priv->chan[idx].desc_vmem, (size_t)priv->chan[idx].hrop_vmem, (size_t)priv->chan[idx].b2h_cb_vmem); dev_dbg(priv->dev, "H2B mapped addr - hrwp: 0x%0x, h2b_cb: 0x%0x\n", (size_t)priv->chan[idx].hrwp_vmem, (size_t)priv->chan[idx].h2b_cb_vmem); dev_dbg(priv->dev, "B2H buffer size: 0x%0x\n", (size_t)priv->chan[idx].b2h_cb_size); dev_dbg(priv->dev, "H2B buffer size: 0x%0x\n", (size_t)priv->chan[idx].h2b_cb_size); /* Initialize the MMBI channel descriptor */ ch_desc = mmbi_desc_init(priv->chan[idx]); memcpy(priv->chan[idx].desc_vmem, &ch_desc, sizeof(ch_desc)); priv->chan[idx].enabled = true; if (!node) { dev_err(priv->dev, "mmbi protocol : no instance found\n"); goto err_destroy_channel; } no_of_protocols_enabled = of_property_count_u8_elems(node, "protocols"); if (no_of_protocols_enabled <= 0 || no_of_protocols_enabled > MAX_NO_OF_SUPPORTED_PROTOCOLS){ dev_err(dev, "No supported mmbi protocol\n"); goto err_destroy_channel; } rc = of_property_read_u8_array(node, "protocols", mmbi_supported_protocols, no_of_protocols_enabled); if (!rc) { memset(&priv->chan[idx].supported_protocols, 0, sizeof(priv->chan[idx].supported_protocols)); memcpy(&priv->chan[idx].supported_protocols, mmbi_supported_protocols, sizeof(mmbi_supported_protocols)); } for (i = 0; i < no_of_protocols_enabled; i++) { char *dev_name; u8 proto_type; proto_type = priv->chan[idx].supported_protocols[i]; dev_name = get_protocol_suffix(proto_type); if (!dev_name) { dev_err(dev, "Unable to get MMBI protocol suffix name\n"); goto err_destroy_channel; } priv->chan[idx].protocol[i].type = proto_type; priv->chan[idx].protocol[i].miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d_%s", DEVICE_NAME, idx, dev_name); priv->chan[idx].protocol[i].miscdev.minor = MISC_DYNAMIC_MINOR; priv->chan[idx].protocol[i].miscdev.fops = &aspeed_espi_mmbi_fops; priv->chan[idx].protocol[i].miscdev.parent = dev; rc = misc_register(&priv->chan[idx].protocol[i].miscdev); if (rc) { dev_err(dev, "Unable to register device\n"); goto err_destroy_channel; } /* Hold the back reference of channel */ priv->chan[idx].protocol[i].chan_ref = &priv->chan[idx]; priv->chan[idx].protocol[i].data_available = false; priv->chan[idx].protocol[i].process_data = false; init_waitqueue_head(&priv->chan[idx].protocol[i].queue); } priv->chan[idx].priv = priv; /* * When BMC goes for reset while host is in OS, SRAM memory will be * remapped and the content in memory will be lost. This include * host ready state which will block memory write transactions. * Ideally this reset has to be done while mapping memory(u-boot). * Since channel initialization (including descriptor) done at kernel, * So added channel reset also during driver load. Future, when staged * commands processing(IPMI commands for BIOS-BMC communication) is * enabled, this check should be moved to u-boot. */ if (send_bmc_reset_request(&priv->chan[idx])) dev_info(dev, "MMBI channel(%d) reset failed\n", idx); dev_info(dev, "MMBI Channel(%d) initialized successfully\n", idx); return 0; err_destroy_channel: if (b2h_vaddr) memunmap(b2h_vaddr); if (h2b_vaddr) memunmap(h2b_vaddr); priv->chan[idx].enabled = false; return -ENOMEM; } static irqreturn_t aspeed_espi_mmbi_irq(int irq, void *arg) { struct aspeed_espi_mmbi *priv = arg; u32 status; int idx; regmap_read(priv->map, ASPEED_MMBI_IRQ_STATUS, &status); /* Clear interrupt */ regmap_write(priv->map, ASPEED_MMBI_IRQ_STATUS, status); for (idx = 0; idx < MAX_NO_OF_SUPPORTED_CHANNELS; idx++) { /* * Host RWP 1: It gets updated after Host reads data and also * when host want to send reset MMBI buffer request. So * Handle reset request and ignore read pointer update. * Host RWP0: It gets updated when host write data on H2B, * So process the request by invoking corresponding device. */ if (!priv->chan[idx].enabled) continue; if ((status >> (idx * 2)) & HRWP1_READ_MASK) check_host_reset_request(&priv->chan[idx]); else wake_up_device(&priv->chan[idx]); } dev_dbg(priv->dev, "MMBI IRQ Status: %d\n", status); return IRQ_HANDLED; } static const struct of_device_id aspeed_espi_mmbi_match[] = { { .compatible = "aspeed,ast2600-espi-mmbi" }, {} }; MODULE_DEVICE_TABLE(of, aspeed_espi_mmbi_match); static int aspeed_espi_mmbi_probe(struct platform_device *pdev) { const struct of_device_id *dev_id; struct aspeed_espi_mmbi *priv; struct device_node *node; struct resource resm; void __iomem *regs; u32 reg_val, enable_irqs; int rc, i; dev_dbg(&pdev->dev, "MMBI: Probing MMBI devices...\n"); priv = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_espi_mmbi), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = &pdev->dev; dev_id = of_match_device(aspeed_espi_mmbi_match, priv->dev); if (!dev_id) { dev_err(priv->dev, "MMBI: Failed to match mmbi device\n"); return -EINVAL; } regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) { dev_err(priv->dev, "MMBI: Failed to get regmap!\n"); return PTR_ERR(regs); } /* MMBI register map */ priv->map = devm_regmap_init_mmio(priv->dev, regs, &aspeed_espi_mmbi_regmap_cfg); if (IS_ERR(priv->map)) { dev_err(priv->dev, "MMBI: Couldn't get regmap\n"); return -ENODEV; } /* ESI register map */ priv->pmap = syscon_regmap_lookup_by_phandle(priv->dev->of_node, "aspeed,espi"); if (IS_ERR(priv->pmap)) { dev_err(priv->dev, "MMBI: Failed to find ESPI regmap\n"); return PTR_ERR(priv->pmap); } /* LPC register map */ priv->lpc_map = syscon_regmap_lookup_by_phandle(priv->dev->of_node, "aspeed,lpc"); if (IS_ERR(priv->lpc_map)) { dev_err(priv->dev, "MMBI: Failed to find LPC regmap\n"); return PTR_ERR(priv->lpc_map); } /* If memory-region is described in device tree then store */ node = of_parse_phandle(priv->dev->of_node, "memory-region", 0); if (node) { rc = of_property_read_u32(priv->dev->of_node, "host-map-addr", &priv->host_map_addr); if (rc) { dev_info(priv->dev, "No host mapping address\n"); priv->host_map_addr = PCH_ESPI_LGMR_BASE_ADDRESS; } rc = of_address_to_resource(node, 0, &resm); of_node_put(node); if (!rc) { priv->mmbi_size = resource_size(&resm); priv->mmbi_phys_addr = resm.start; } else { priv->mmbi_size = ESPI_MMBI_TOTAL_SIZE; priv->mmbi_phys_addr = BMC_SRAM_BASE_ADDRESS; } } else { dev_dbg(priv->dev, "No DTS config, assign default MMBI Address\n"); priv->host_map_addr = PCH_ESPI_LGMR_BASE_ADDRESS; priv->mmbi_size = ESPI_MMBI_TOTAL_SIZE; priv->mmbi_phys_addr = BMC_SRAM_BASE_ADDRESS; } dev_dbg(priv->dev, "MMBI: HostAddr:0x%x, SramAddr:0x%x, Size: 0x%0x\n", priv->host_map_addr, priv->mmbi_phys_addr, priv->mmbi_size); priv->dma_vaddr = dma_alloc_coherent(priv->dev, priv->mmbi_size, &priv->mmbi_phys_addr, GFP_KERNEL); if (!priv->dma_vaddr) { dev_err(priv->dev, "MMBI: DMA memory allocation failed\n"); return -ENOMEM; } dev_dbg(priv->dev, "MMBI: DMA Addr: 0x%x\n", (u32)priv->dma_vaddr); memset(priv->dma_vaddr, 0, priv->mmbi_size); crc8_populate_msb(mmbi_crc8_table, MMBI_CRC8_POLYNOMIAL); /* eSPI Controller settings */ regmap_write(priv->pmap, ASPEED_ESPI_PC_RX_SADDR, priv->host_map_addr); regmap_write(priv->pmap, ASPEED_ESPI_PC_RX_TADDR, priv->mmbi_phys_addr); regmap_write(priv->pmap, ASPEED_ESPI_PC_RX_TADDRM, ASPEED_ESPI_PC_RX_TADDR_MASK); regmap_update_bits(priv->pmap, ASPEED_ESPI_CTRL2, ESPI_DISABLE_PERP_MEM_READ | ESPI_DISABLE_PERP_MEM_WRITE, 0); /* MMBI controller Settings */ regmap_read(priv->map, ASPEED_MMBI_CTRL, &reg_val); regmap_read(priv->map, ASPEED_MMBI_IRQ_ENABLE, &reg_val); regmap_write(priv->map, ASPEED_MMBI_CTRL, MMBI_ENABLE_FUNCTION | MMBI_TOTAL_SIZE_64K | MMBI_INSTANCE_SIZE_64K); regmap_write(priv->map, ASPEED_MMBI_IRQ_ENABLE, 0x03); dev_set_drvdata(priv->dev, priv); for_each_child_of_node(priv->dev->of_node, node) { rc = of_property_read_u32(node, "channel", &i); if (rc || i >= MAX_NO_OF_SUPPORTED_CHANNELS || priv->chan[i].enabled) continue; rc = mmbi_channel_init(priv, node, i); if (rc) { dev_err(priv->dev, "MMBI: Channel(%d) init failed\n", i); } else { enable_irqs += (0x03 << i); } } regmap_write(priv->map, ASPEED_MMBI_IRQ_ENABLE, enable_irqs); /* Enable IRQ */ priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) { dev_err(priv->dev, "MMBI: No irq specified\n"); return priv->irq; } rc = devm_request_irq(priv->dev, priv->irq, aspeed_espi_mmbi_irq, IRQF_SHARED, dev_name(priv->dev), priv); if (rc) { dev_err(priv->dev, "MMBI: Unable to get IRQ\n"); return rc; } dev_dbg(priv->dev, "MMBI: aspeed MMBI driver loaded successfully\n"); return 0; } static int aspeed_espi_mmbi_remove(struct platform_device *pdev) { struct aspeed_espi_mmbi *priv = dev_get_drvdata(&pdev->dev); int i, j; dev_dbg(priv->dev, "MMBI: Removing MMBI device\n"); for (i = 0; i < MAX_NO_OF_SUPPORTED_CHANNELS; i++) { if (!priv->chan[i].enabled) continue; for (j = 0; priv->chan[i].supported_protocols[j] != 0; j++) misc_deregister(&priv->chan[i].protocol[j].miscdev); } if (priv->dma_vaddr) dma_free_coherent(priv->dev, priv->mmbi_size, priv->dma_vaddr, priv->mmbi_phys_addr); return 0; } static struct platform_driver aspeed_espi_mmbi_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = aspeed_espi_mmbi_match, }, .probe = aspeed_espi_mmbi_probe, .remove = aspeed_espi_mmbi_remove, }; module_platform_driver(aspeed_espi_mmbi_driver); MODULE_AUTHOR("AppaRao Puli <apparao.puli@intel.com>"); MODULE_DESCRIPTION("MMBI Driver"); MODULE_LICENSE("GPL v2");(这是我的代码,他主要是将3个寄存器冲掉了,分别是ASPEED_ESPI_PC_RX_SADDR,ASPEED_ESPI_PC_RX_TADDR,ASPEED_ESPI_PC_RX_TADDRM)
09-12
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值