spi driver: kthread_worker 和 kthread_work

kthread_worker与kthread_work详解
本文详细介绍了Linux内核中的kthread_worker和kthread_work机制,包括它们的结构体定义、初始化方法、核心函数kthread_worker_fn的工作流程及队列化操作等。通过一张图直观展示了worker和work之间的关系。

kthread_worker 和 kthread_work

转载自:

https://blog.youkuaiyun.com/qqliyunpeng/article/details/53931350

作者: 李云鹏(qqliyunpeng@sina.cn)

1. 简介:

        在spi驱动中用到了内核的线程,用的函数就是跟 kthread_worker 和 kthread_work 相关的函数,对于这两个名词的翻译,在网上暂时没有找到合适的,先翻译成线程内核线程相关的:工人和工作,这么直白的翻译是根据其工作原理相关的,本来想翻译成别的,一想到他的实现方式,直白的翻译,更能让人理解。

        此部分介绍的函数主要在 include/linux/kthread.h 文件,这里可以推测,也许是内核为了方便我们使用内核的线程,而设计的kthread_work和kthread_worker。


2. 函数:


2.1 先来看这两个结构体:


kthread_worke

kthread_worker

[cpp]  view plain  copy
  1. struct kthread_worker {  
  2.     spinlock_t      lock;  
  3.     struct list_head    work_list;  
  4.     struct task_struct  *task;  
  5.     struct kthread_work *current_work;  
  6. };  
  7.   
  8. struct kthread_work {  
  9.     struct list_head    node;  
  10.     kthread_work_func_t func;  
  11.     wait_queue_head_t   done; //  等待队列,内部成员是一个锁和一个链表节点  
  12.     struct kthread_worker   *worker;  
  13. };  

【1】其中的 wait_queue_head_t 结构体需要解析一下:

[cpp]  view plain  copy
  1. struct __wait_queue_head { // 是一个带锁的链表节点  
  2.     spinlock_t lock;  
  3.     struct list_head task_list;  
  4. };  
  5. typedef struct __wait_queue_head wait_queue_head_t;  

2.2 声明:

DEFINE_KTHREAD_WORK宏

DEFINE_KTHREAD_WORKER宏

[cpp]  view plain  copy
  1. #define KTHREAD_WORKER_INIT(worker) {               \  
  2.     .lock = __SPIN_LOCK_UNLOCKED((worker).lock),            \ // 初始化worker中lock  
  3.     .work_list = LIST_HEAD_INIT((worker).work_list),        \ // 初始化worker中的链表节点 work_list  
  4.     }  
  5.   
  6. #define KTHREAD_WORK_INIT(work, fn) {               \  
  7.     .node = LIST_HEAD_INIT((work).node),                \ // 初始化work中的链表节点 node (next和pre指针指向自己的首地址)  
  8.     .func = (fn),                           \ // func成员赋值  
  9.     .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done),     \ // 初始化 done 成员 (初始化等待队列中的锁和链表节点,  
  10.     }                                                                 // 链表节点的初始化就是next和pre指针指向节点的首地址)  
  11.   
  12. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {               \  
  13.     .lock       = __SPIN_LOCK_UNLOCKED(name.lock),      \  
  14.     .task_list  = { &(name).task_list, &(name).task_list } }  
  15.   
  16. #define DEFINE_KTHREAD_WORKER(worker)                   \  
  17.     struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)  
  18.   
  19. #define DEFINE_KTHREAD_WORK(work, fn)                   \  
  20.     struct kthread_work work = KTHREAD_WORK_INIT(work, fn)  

2.3 初始化


init_kthread_work宏

init_kthread_worker宏

[cpp]  view plain  copy
  1. #define init_kthread_worker(worker)                 \ // 初始化 kthread_worker  
  2.     do {                                \  
  3.         static struct lock_class_key __key;         \  
  4.         __init_kthread_worker((worker), "("#worker")->lock", &__key); \  
  5.     } while (0)  
  6.   
  7. #define init_kthread_work(work, fn)                 \ // 初始化 kthread_work  
  8.     do {                                \  
  9.         memset((work), 0, sizeof(struct kthread_work));     \  
  10.         INIT_LIST_HEAD(&(work)->node);               \ // 初始化成员 node 链表节点  
  11.         (work)->func = (fn);                 \ // 将回调函数的指针指向fn函数,内核线程将会一直执行的函数  
  12.         init_waitqueue_head(&(work)->done);          \ // 初始化成员 done (等待队列)  
  13.     } while (0)  
  14.   
  15. void __init_kthread_worker(struct kthread_worker *worker,  
  16.                 const char *name,  
  17.                 struct lock_class_key *key)  
  18. {  
  19.     spin_lock_init(&worker->lock);  
  20.     lockdep_set_class_and_name(&worker->lock, key, name); // 跟防止死锁有关,此处不深究  
  21.     INIT_LIST_HEAD(&worker->work_list); // 初始化 work_list 链表节点  
  22.     worker->task = NULL;  
  23. }  

2.4 内核线程一直执行的函数


kthread_worker_fn函数

[cpp]  view plain  copy
  1. /** 
  2.  * kthread_worker_fn - kthread 函数目的是执行 kthread_worker中work_list下的work,此函数是作为内核线程中一直执行的函数 
  3.  * @worker_ptr: 指向初始化了的 kthread_worker 
  4.  */  
  5. int kthread_worker_fn(void *worker_ptr)  
  6. {  
  7.     struct kthread_worker *worker = worker_ptr;  
  8.     struct kthread_work *work;  
  9.   
  10.     WARN_ON(worker->task);  
  11.     worker->task = current;  
  12. repeat:  
  13.     set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */  
  14.   
  15.     if (kthread_should_stop()) { // 如果接收到线程停止的信号  
  16.         __set_current_state(TASK_RUNNING);  
  17.         spin_lock_irq(&worker->lock);  
  18.         worker->task = NULL;  
  19.         spin_unlock_irq(&worker->lock);  
  20.         return 0;  
  21.     }  
  22.   
  23.     work = NULL;  
  24.     spin_lock_irq(&worker->lock);  
  25.     if (!list_empty(&worker->work_list)) { // 如果 worker中的work_list链表不是空的  
  26.         work = list_first_entry(&worker->work_list, // 取出头结点后边的第一个结构体kthread_work  
  27.                     struct kthread_work, node);  
  28.         list_del_init(&work->node); // 删除链表中的入口  
  29.     }  
  30.     worker->current_work = work; // 将正在处理的work地址赋给 worker中的current_work成员  
  31.     spin_unlock_irq(&worker->lock);  
  32.   
  33.     if (work) { // 如果有 work  
  34.         __set_current_state(TASK_RUNNING); // 启动内核线程  
  35.         work->func(work); // 运行work中的func函数  
  36.     } else if (!freezing(current)) // 如果没有work要做,并且没有freeze,则主动请求调度,主动放弃cpu时间片  
  37.         schedule();  
  38.   
  39.     try_to_freeze();  
  40.     goto repeat; // 无限循环  
  41. }  
【1】可以看到,这个函数的关键就是重复的执行kthread_worker结构体中的work_list链表锁挂接的kthread_work中的func函数,直到work_list变为空为止。

【2】要知道的是kthread是内核线程,是一直运行在内核态的线程

【3】这个函数一般是作为回调函数使用,比如spi.c中的如下程序

[cpp]  view plain  copy
  1. master->kworker_task = kthread_run(kthread_worker_fn,  
  2.                        &master->kworker,  
  3.                        dev_name(&master->dev));  
  4.   
  5. /** 
  6.  * kthread_run - 创建并唤醒一个内核线程 
  7.  * @threadfn: the function to run until signal_pending(current). 
  8.  * @data: data ptr for @threadfn. 
  9.  * @namefmt: printf-style name for the thread. 
  10.  * 
  11.  * Description: Convenient wrapper for kthread_create() followed by 
  12.  * wake_up_process().  Returns the kthread or ERR_PTR(-ENOMEM). 
  13.  */  
  14. #define kthread_run(threadfn, data, namefmt, ...) ...(此处省略)  


2.5 队列化kthread_work


queue_kthread_work 函数

[cpp]  view plain  copy
  1. /** 
  2.  * queue_kthread_work - 队列化一个 kthread_work,实质是将work中的node节点挂接到worker中的work_list后边,并尝试唤醒worker中的任务 
  3.  * @worker: 目标 kthread_worker 
  4.  * @work: 要队列化的 kthread_work 
  5.  * 
  6.  * 队列化 @work 目的是为了让任务异步执行.  @task 
  7.  * 必须已经被 kthread_worker_create() 创建了.   
  8.  * 队列化成功,返回true,不成功返回false 
  9.  */  
  10. bool queue_kthread_work(struct kthread_worker *worker,  
  11.             struct kthread_work *work)  
  12. {  
  13.     bool ret = false;  
  14.     unsigned long flags;  
  15.   
  16.     spin_lock_irqsave(&worker->lock, flags);  
  17.     if (list_empty(&work->node)) {   // 这里保证要插入到worker中链表节点的work的node节点一定要是一个独立的,不能是一串  
  18.         insert_kthread_work(worker, work, &worker->work_list);  
  19.         ret = true;  
  20.     }  
  21.     spin_unlock_irqrestore(&worker->lock, flags);  
  22.     return ret;  
  23. }  
  24.   
  25. /* 在@worker中的work_list链表中的@pos位置的后边插入@work中的链表节点 */  
  26. static void insert_kthread_work(struct kthread_worker *worker,  
  27.                    struct kthread_work *work,  
  28.                    struct list_head *pos)  
  29. {  
  30.     lockdep_assert_held(&worker->lock);  
  31.   
  32.     list_add_tail(&work->node, pos);  
  33.     work->worker = worker; // work中的worker指针指向worker  
  34.     if (likely(worker->task))  
  35.         wake_up_process(worker->task); // 尝试唤醒一下 worker 中的task指向的线程来处理work  
  36. }  

2.6 执行完worker中的work


flush_kthread_worker 函数

[cpp]  view plain  copy
  1. struct kthread_flush_work {  
  2.     struct kthread_work work;  
  3.     struct completion   done;  
  4. };  
  5.   
  6. static void kthread_flush_work_fn(struct kthread_work *work)  
  7. {  
  8.     struct kthread_flush_work *fwork =  
  9.         container_of(work, struct kthread_flush_work, work);  
  10.     complete(&fwork->done); // 唤醒完成量  
  11. }  
  12.   
  13. /** 
  14.  * flush_kthread_worker - flush all current works on a kthread_worker 
  15.  * @worker: worker to flush 
  16.  * 
  17.  * Wait until all currently executing or pending works on @worker are 
  18.  * finished. 
  19.  */  
  20. void flush_kthread_worker(struct kthread_worker *worker)  
  21. {  
  22.     struct kthread_flush_work fwork = {  
  23.         KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),  
  24.         COMPLETION_INITIALIZER_ONSTACK(fwork.done), // ON_STACK后缀相当于加了static  
  25.     };  
  26.   
  27.     queue_kthread_work(worker, &fwork.work); // 将 fwork中的work成员的node节点过接到worker_list下,并尝试唤醒线程进行kthread_flush_work_fn函数的执行  
  28.     wait_for_completion(&fwork.done); // 调用这个函数的线程睡眠等待在这里,等待执行worker中work_list下的fulsh_kthread_work完kthread_flush_work_fn函数  
  29. }  
【1】如何就flush了呢?看2.7的总结


2.7 总结的一张图:


        说了半天,其实woker和work的关系还是很难理解的,当我们经过一段时间,再次看的时候,难免还要花很长时间,因此,我画了一张图:

【1】worker中的task执行的是各自work中的func指定的函数,此规则同样适用于kthread_flush_work

【2】kthread_flush_work中的函数是kthread.c文件指定的函数,而kthread_work中的函数是用户自己定义的函数

【3】每次唤醒线程执行的work都是worker中的work_list下挂载的正常顺序的第一个

【4】如何实现等待全部的work都执行完呢?调用的是flush_kthread_worker函数中的wait_for_completion(&fwork.done);语句,只有当前边的work都执行完,才能轮到kthread_flush_work中的kthread_flush_work_fn的执行,此函数就是唤醒kthread_flush_work中的done。从而确定了前边的work都执行完了

403.879732: RNDIS_IPA NetDev was opened 403.891277: IPv6: ADDRCONF(NETDEV_CHANGE): rndis0: link becomes ready 403.891428: dwc3 4e00000.dwc3: Clr_TRB ring of ep1in 403.893656: RNDIS_IPA@rndis_ipa_start_xmit@1027@ctx:kworker/1:0: Missing pipe connected and/or iface up 403.908086: dwc3 4e00000.dwc3: gsi_prepare_trbs(): trb_pool:0000000000000000 trb_pool_dma:73c08000 403.908113: dwc3 4e00000.dwc3: 0: page_link:fffffffe00cf0202 offset:0 length:1000 address:0 403.908122: dwc3 4e00000.dwc3: gsi_prepare_trbs: Initialized TRB Ring for ep1in 403.908141: dwc3 4e00000.dwc3: XferRsc = 3 403.908152: dwc3 4e00000.dwc3: depcmd_laddr=4e0c83c last_trb_addr=8650 gevtcnt_laddr=4e0c42c gevtcnt_haddr=0 403.908168: dwc3 4e00000.dwc3: Clr_TRB ring of ep1out 403.912576: dwc3 4e00000.dwc3: gsi_prepare_trbs(): trb_pool:0000000000000000 trb_pool_dma:45224000 403.912603: dwc3 4e00000.dwc3: 0: page_link:fffffffe00148902 offset:0 length:1000 address:0 403.912611: dwc3 4e00000.dwc3: gsi_prepare_trbs: Initialized TRB Ring for ep1out 403.912630: dwc3 4e00000.dwc3: XferRsc = 2 403.912642: dwc3 4e00000.dwc3: depcmd_laddr=4e0c82c last_trb_addr=40f0 gevtcnt_laddr=4e0c41c gevtcnt_haddr=0 403.920331: Unable to handle kernel paging request at virtual address ffffffc013fce000 403.920349: Mem abort info: 403.920351: ESR = 0x96000047 403.920354: EC = 0x25: DABT (current EL), IL = 32 bits 403.920357: SET = 0, FnV = 0 403.920359: EA = 0, S1PTW = 0 403.920361: FSC = 0x07: level 3 translation fault 403.920364: Data abort info: 403.920365: ISV = 0, ISS = 0x00000047 403.920367: CM = 0, WnR = 1 403.920370: swapper pgtable: 4k pages, 39-bit VAs, pgdp=0000000042a8a000 403.920373: [ffffffc013fce000] pgd=10000000434a8003, p4d=10000000434a8003, pud=10000000434a8003, pmd=100000007b038003, pte=0000000000000000 403.920386: Internal error: Oops: 96000047 [#1] PREEMPT SMP 403.920599: Skip md ftrace buffer dump for: 0x1609e0 403.920604: Modules linked in: wlan(OE) msm_kgsl(OE) msm_drm(OE) ipa_clientsm(OE) camera(OE) rndisipam(OE) ipanetm(OE) rmnet_wlan(OE) icnss2(OE) msm_video(OE) cnss_nl(OE) cnss_prealloc(OE) ipam(OE) rmnet_core(OE) wlan_firmware_service(OE) ilitek2131(OE) bt_fm_slim(OE) msm_mmrm(OE) btpower(OE) cnss_utils(OE) gsim(OE) rmnet_ctl(OE) radio_i2c_rtc6226_qca(OE) usb_f_gsi(E) usb_f_qdss(E) landi_usb7016_i2c(E) va_macro_dlkm(OE) smcinvoke_dlkm(OE) machine_dlkm(OE) qcedev_mod_dlkm(OE) qcrypto_msm_dlkm(OE) rx_macro_dlkm(OE) wcd937x_dlkm(OE) tx_macro_dlkm(OE) tz_log_dlkm(OE) audio_pkt_dlkm(OE) wsa881x_analog_dlkm(OE) wcd9xxx_dlkm(OE) pinctrl_lpi_dlkm(OE) audio_prm_dlkm(OE) adsp_loader_dlkm(OE) hdcp_qseecom_dlkm(OE) q6_dlkm(OE) swr_ctrl_dlkm(OE) wcd_core_dlkm(OE) bolero_cdc_dlkm(OE) qce50_dlkm(OE) mbhc_dlkm(OE) wcd937x_slave_dlkm(OE) stub_dlkm(OE) swr_dlkm(OE) spf_core_dlkm(OE) gpr_dlkm(OE) snd_event_dlkm(OE) cfg80211(E) q6_notifier_dlkm(OE) msm_qmp(E) leds_gpio(E) 403.920680: leds_qpnp_vibrator_ldo(E) leds_qti_tri_led(E) qrtr_smd(E) qcom_q6v5_pas(E) qcom_pil_info(E) qcom_spss(E) qcom_q6v5(E) cpufreq_userspace(E) q6_pdr_dlkm(OE) qcom_spmi_adc5(E) snd_usb_audio_qmi(E) ipa_fmwk(E) icc_test(E) audpkt_ion_dlkm(OE) rpm_smd_cooling_device(E) cpufreq_ondemand(E) qcom_sysmon(E) qti_cpufreq_cdev(E) cx_ipeak_cdev(E) bcl_pmic5(E) qti_userspace_cdev(E) qcom_lpm(E) msm_tsens_driver(E) leds_qti_flash(E) qti_devfreq_cdev(E) regulator_cdev(E) qti_qmi_cdev(E) bcl_soc(E) qti_qmi_sensor(E) qcom_cpufreq_hw_debug(E) msm_lmh_dcvs(E) thermal_pause(E) qcom_spmi_temp_alarm(E) qcom_spmi_adc_tm5(E) cpu_hotplug(E) qcom_pon(E) qcom_smb1398_charger(E) lmh_cpu_vdd_cdev(E) qpnp_smb5_main(E) qcom_tsens(E) qcom_vadc_common(E) qcom_qpnp_qg(E) qti_battery_charger(E) rtc_pm8xxx(E) qcom_smb1355_charger(E) reboot_mode(E) i2c_msm_geni(E) gms_battery_prop(E) sgm7220(E) msm_sharedmem(E) frpc_adsprpc(E) pm8941_pwrkey(E) qpnp_pdphy(E) usb_f_ccid(E) ucsi_glink(E) 403.920748: usb_f_cdev(E) ehset(E) cx_ipeak(E) usb_f_diag(E) spmi_glink_debug(E) phy_qcom_emu(E) ld_class_drv(E) usb_bam(E) sg(E) lt6911_ver(E) dwc3_msm(E) spi_msm_geni(E) core_hang_detect(E) rpm_smd_debug(E) rq_stats(E) phy_msm_ssusb_qmp(E) apex_drv(E) f_fs_ipc_log(E) smp2p_sleepstate(E) gpucc_khaje(E) memlat(E) pinctrl_spmi_gpio(E) phy_msm_snps_hs(E) clk_rpm(E) aw9523b_key(E) mdt_loader(E) leds_aw99703(E) boot_stats(E) qcom_i2c_pmic(E) hung_task_enh(E) eud(E) qcom_iommu_debug(E) phy_generic(E) qcom_aoss(E) rpm_master_stat(E) msm_geni_serial(E) qcom_ramdump(E) altmode_glink(E) ch341(E) qfprom_sys(E) qcom_rpm(E) debugcc_khaje(E) sps_drv(E) rdbg(E) phy_qcom_ufs_qmp_v4(E) msm_memshare(E) liandi_gpiodemo(E) sub_lcd_lt9721(E) panel_event_notifier(E) guestvm_loader(E) ch34x(E) charger_ulog_glink(E) qcom_va_minidump(E) qti_battery_debug(E) phy_xgene(E) cdsprm(E) bwmon(E) msm_performance(E) nxpnfc_i2c(E) qcom_dcvs(E) cdsp_loader(E) msm_gpi(E) glink_pkt(E) 403.920826: landi_event_notifier(E) heap_mem_ext_v01(E) qcom_pm8008_regulator(E) dispcc_khaje(E) pinctrl_spmi_mpp(E) refgen(E) pmic_glink(E) pwm_qti_lpg(E) pinctrl_aw9523b(E) phy_qcom_ufs_qmp_v4_waipio(E) phy_qcom_ufs_qmp_v4_kalama(E) msm_sysstats(E) zram zsmalloc slim_qcom_ngd_ctrl(E) glink_probe(E) pdr_interface(E) rproc_qcom_common(E) qmi_helpers(E) ufs_qcom(E) sdhci_msm(E) qrtr(E) qcom_glink_smem(E) qcom_glink_rpm(E) qcom_glink_spss(E) arm_smmu(E) clk_smd_rpm(E) bam_dma(E) clk_dummy(E) cqhci(E) gcc_khaje(E) mem_buf(E) ns(E) phy_qcom_ufs_qmp_v4_khaje(E) qcom_dma_heaps(E) qseecom_dlkm(OE) phy_qcom_ufs_qrbtc_sdm845(E) qcom_glink(E) qnoc_bengal(E) qcom_soc_wdt(E) smp2p(E) usbpd(E) ufshcd_crypto_qti(E) clk_qcom(E) crypto_qti_common(E) icc_rpm(E) dcc_v2(E) mem_offline(E) mem_buf_dev(E) memory_dump_v2(E) qcom_iommu_util(E) pinctrl_bengal(E) pinctrl_khaje(E) qcom_ipc_logging(E) phy_qcom_ufs(E) msm_poweroff(E) rpm_smd_regulator(E) qcom_logbuf_vh(E) qcom_wdt_core(E) 403.920897: sched_walt(E) crypto_qti_tz(E) gdsc_regulator(E) minidump(E) pinctrl_msm(E) qcom_smd(E) rpm_smd(E) qcom_spmi_pmic(E) socinfo(E) secure_buffer(E) stub_regulator(E) debug_regulator(E) soc_sleep_stats(E) icc_debug(E) iommu_logger(E) msm_dma_iommu_mapping(E) nvmem_qcom_spmi_sdam(E) nvmem_qfprom(E) proxy_consumer(E) qcom_apcs_ipc_mailbox(E) smem(E) qcom_mpm(E) qcom_cpu_vendor_hooks(E) qcom_cpufreq_hw(E) qcom_pmu_lib(E) qcom_scm(E) qpnp_power_on(E) qnoc_qos_rpm(E) qrng_dlkm(OE) qti_regmap_debugfs(E) slimbus(E) regmap_spmi(E) qcom_hwspinlock(E) spmi_pmic_arb(E) 403.920942: CPU: 7 PID: 12183 Comm: kworker/u16:13 Tainted: G W OE 5.15.168-android13-8-gc3eb426f91e1-dirty #1 403.920947: Hardware name: Qualcomm Technologies, Inc. KHAJE IDP nopmi (DT) 403.920951: Workqueue: k_ipa_usb ipa_work_handler.cfi_jt [usb_f_gsi] 403.920978: pstate: 20400005 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) 403.920983: pc : av8l_fast_map_pages+0xc8/0x128 [qcom_iommu_util] 403.921004: lr : arm_smmu_map_pages+0xcc/0x260 [arm_smmu] 403.921041: sp : ffffffc024df3680 403.921043: x29: ffffffc024df3680 x28: ffffffdf62a620f0 x27: ffffff803c173f68 403.921049: x26: 00000000f9c00000 x25: 0000000000000003 x24: ffffffc024df3740 403.921054: x23: 0000000000000a20 x22: ffffff80361c3200 x21: 00000000f9c00000 403.921059: x20: 00000000000c8000 x19: ffffffc024df3740 x18: ffffffc0136c7078 403.921064: x17: ffffffc013fce008 x16: 00000000000c8000 x15: 00000000000000c8 403.921069: x14: 00000000000000c8 x13: 00000000f9c00000 x12: 0060000000000e43 403.921074: x11: 00000000000000c8 x10: 00000000000f9c00 x9 : ffffff803c173f00 403.921079: x8 : ffffffc013800000 x7 : ffffffc024df3740 x6 : 0000000000000a20 403.921084: x5 : 0000000000000003 x4 : 00600000f9c00e43 x3 : 00600000f9c01e43 403.921089: x2 : 00000000000000c6 x1 : ffffffc013fce000 x0 : 00000000f9c02000 403.921094: Call trace: 403.921097: av8l_fast_map_pages+0xc8/0x128 [qcom_iommu_util] 403.921116: arm_smmu_map_pages+0xcc/0x260 [arm_smmu] 403.921145: __iommu_map+0x180/0x3b4 403.921153: iommu_map+0x34/0x94 403.921157: ipa3_iommu_map+0x41c/0x75c [ipam] 403.921383: ipa3_smmu_map_peer_buff+0x1a4/0x6bc [ipam] 403.921591: ipa3_usb_smmu_map_xdci_channel+0x158/0x414 [ipa_clientsm] 403.921613: ipa3_usb_request_xdci_channel+0x46c/0x9a8 [ipa_clientsm] 403.921630: ipa_usb_xdci_connect_internal+0x2ac/0xe28 [ipa_clientsm] 403.921648: ipa_usb_xdci_connect+0x3c/0xd0 [ipa_fmwk] 403.921690: ipa_connect_channels+0x3c0/0x7c4 [usb_f_gsi] 403.921711: ipa_work_handler+0x7f8/0xbb4 [usb_f_gsi] 403.921730: process_one_work+0x254/0x5a0 403.921737: worker_thread+0x398/0x790 403.921741: kthread+0x168/0x1dc 403.921745: ret_from_fork+0x10/0x20 403.921751: Code: aa000184 aa030183 f1000842 91400800 (a93f8e24) 403.921755: ---[ end trace 40473a25e6f92a4e ]--- 403.921760: Kernel panic - not syncing: Oops: Fatal exception 403.921763: SMP: stopping secondary CPUs 403.921773: VendorHooks: CPU3: stopping 403.921789: CPU: 3 PID: 1406 Comm: kworker/u17:3 Tainted: G D W OE 5.15.168-android13-8-gc3eb426f91e1-dirty #1 403.921797: Hardware name: Qualcomm Technologies, Inc. KHAJE IDP nopmi (DT) 403.921804: Workqueue: fsverity_read_queue f2fs_verify_bio.cfi_jt 403.921818: pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) 403.921825: pc : super_cache_count+0xf4/0x244 403.921834: lr : super_cache_count+0x6c/0x244 403.921841: sp : ffffffc00f9eb390
08-02
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值