request_suspend_state: wakeup (3->0) at

本文讨论了Android系统中,将自定义的initlogo.rle图片放入根目录下,首次启动能正常显示,但第二次启动却无法找到该文件的原因。分析了init.c文件中对initlogo.rle进行的unlink操作导致的问题,并提供了解决方案,即通过注释相关代码来避免文件被删除。

摘自:http://hi.baidu.com/supertreeman/blog/item/f96ea8119ff3b64ff919b847.html

android有时会遇到把自己制作的initlogo.rle放入 根目录下,第一次启动能够看到图片,第2次启动就有看不了,提示没有 initlogo.rle?

  1. [   30.288271] init: cannot open '/initlogo.rle'                                
    [   31.272972] enabling adb                                                     
    [   31.273105] Enabled => usb_mass_storage                                      
    [   31.273141] Enabled => adb                                                   
    [   31.293419] adb_open                                                         
    / # [   33.201290] PVR_K:(Warning): SysFinalise: Version string: SGX540 S5PC110]
    [   36.211978] s3c-nand: 1 bit error detected at byte 197, correcting from 0x66K
    [   36.554264] s3c_idma_preallocate_buffer:  VA-e0940000  PA-C0000000  163840bys
    [   36.554818] asoc: WM8960 <-> s5pc1xx-i2s mapping ok                          
    [   37.391157] Compat-wireless backport release: compat-wireless-2011-06-28-2-FA
    [   37.391225] Backport based on linux-next.git next-20110707                   
    [   37.516713] cfg80211: Calling CRDA to update world regulatory domain         
    [   37.605989] libertas_sdio: Libertas SDIO driver                              
    [   37.606037] libertas_sdio: Copyright Pierre Ossman                           
    [   67.960213] warning: `zygote' uses 32-bit capabilities (legacy support in us)
    [  132.503258] request_suspend_state: wakeup (3->0) at 131967833628 (2011-01-05)
    [  132.506383] init: untracked pid 57 exited                                    
    [  132.506456] init: untracked pid 63 exited


查看代码可发现,在system/core/init/init.c [load_565rle_image] --> logo.c中 ,显示完毕initlogo.rle之后作了 unlink 操作。该操作对于android 默认的只读型system.img 是没问题,, 但对于自定义的yaffs 或者 ubi形式的可读写型的system.img时就有问题了。会造成将initlogo.rle删除,下次启动时读取不到该文件。可以通过注释掉 logo.c的load_565rle_image()中的unlink(fn)语句来规避该问题。


#define pr_fmt(fmt) "gpio-privacy: " fmt #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/mutex.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/input.h> #define DEFAULT_DEBOUNCE_INTERVAL 5 enum privacy_state { PRIVACY_STATE_OFF = 0, /* HW privacy is OFF */ PRIVACY_STATE_ON, /* HW privacy is ON */ }; struct privacy_state_warning_event { const char *desc; unsigned int input_type; unsigned int code; unsigned int input_value; struct input_dev *input_dev; struct work_struct work; struct privacy_priv *priv; }; struct privacy_state_event { const char *desc; unsigned int input_type; unsigned int code; struct input_dev *input_dev; struct work_struct work; int state_gpio; enum of_gpio_flags state_gpio_flags; struct privacy_priv *priv; }; struct privacy_button_event { const char *desc; unsigned int code; unsigned int input_type; int debounce_interval; struct input_dev *input_dev; struct delayed_work work; bool wakeup_capable; int button_gpio; enum of_gpio_flags button_gpio_flags; int last_button_event; unsigned long last_button_press_time; unsigned long last_button_release_time; struct privacy_priv *priv; }; struct privacy_priv { int enable_gpio; enum of_gpio_flags enable_gpio_flags; int enable_gpio_toggle_duration; int privacy_event_max_press_duration; int auto_toggle_enable_gpio_time; bool is_desired_privacy_state_on; struct mutex mutex; struct privacy_state_warning_event *state_warning_event; struct privacy_state_event *state_event; struct privacy_button_event *button_event; struct delayed_work work; }; /* Forward declarations: */ static enum privacy_state __privacy_state(struct privacy_priv *priv); static int __set_privacy_enable(struct privacy_priv *priv); static void handle_privacy_button_event(struct privacy_button_event *button_event, bool button_pressed, bool do_toggle) { enum privacy_state cur_state; struct privacy_priv *priv = button_event->priv; struct privacy_state_warning_event *state_warning_event = priv->state_warning_event; unsigned long button_press_duration; unsigned long last_button_press_time; bool is_desired_privacy_state_on; cur_state = __privacy_state(priv); if (button_pressed) { mutex_lock(&priv->mutex); button_event->last_button_press_time = jiffies; /* With the Silego chip, we only know whether we are entering * privacy state on the button press because on button press * Silego locks the current state, so save this off because * we will need this info on button release. */ if (cur_state == PRIVACY_STATE_OFF) priv->is_desired_privacy_state_on = true; mutex_unlock(&priv->mutex); pr_debug("%s: privacy button PRESSED cur_state=%d\n", __func__, cur_state); return; } pr_debug("%s: privacy button RELEASED cur_state=%d\n", __func__, cur_state); /* privacy button released ! */ mutex_lock(&priv->mutex); button_event->last_button_release_time = jiffies; last_button_press_time = button_event->last_button_press_time; is_desired_privacy_state_on = priv->is_desired_privacy_state_on; priv->is_desired_privacy_state_on = false; mutex_unlock(&priv->mutex); if (priv->privacy_event_max_press_duration > 0) { button_press_duration = last_button_press_time + msecs_to_jiffies(priv->privacy_event_max_press_duration); /* Ignore long press because it might be for Power Event or Factory Reset */ if (time_after(jiffies, button_press_duration)) { pr_debug("%s: POWER EVENT: cur_state=%d\n", __func__, cur_state); is_desired_privacy_state_on = false; } } if (is_desired_privacy_state_on && do_toggle) { if (state_warning_event != NULL) { pr_debug("%s: state_warning schedule work: cur_state=%d\n", __func__, cur_state); schedule_work(&state_warning_event->work); } if (priv->auto_toggle_enable_gpio_time >= 0) { pr_info("%s: auto-toggle-enable schedule work\n", __func__); schedule_delayed_work(&priv->work, msecs_to_jiffies(priv->auto_toggle_enable_gpio_time)); } } } static void privacy_work_func(struct work_struct *work) { struct privacy_priv *priv = container_of(work, struct privacy_priv, work.work); mutex_lock(&priv->mutex); pr_info("%s: privacy state enable immediately\n", __func__); __set_privacy_enable(priv); mutex_unlock(&priv->mutex); } static void privacy_state_warning_event_work_func(struct work_struct *work) { struct privacy_state_warning_event *state_warning_event = container_of(work, struct privacy_state_warning_event, work); pr_info("%s: sending state warning to userspace. input_type=0x%x code=0x%x\n", __func__, state_warning_event->input_type, state_warning_event->code); if (state_warning_event->input_type == EV_KEY || state_warning_event->input_type == EV_SW) { /* * EV_KEY key events and EV_SW switch events require a full transition from 0 to 1 * and then 1 to 0 in order to get future events */ input_event(state_warning_event->input_dev, state_warning_event->input_type, state_warning_event->code, 1); input_sync(state_warning_event->input_dev); input_event(state_warning_event->input_dev, state_warning_event->input_type, state_warning_event->code, 0); input_sync(state_warning_event->input_dev); } else if (state_warning_event->input_type == EV_MSC || state_warning_event->code == MSC_RAW) { /* * EV_MSC events only send a single event with &#39;input value&#39; from dts */ input_event(state_warning_event->input_dev, state_warning_event->input_type, state_warning_event->code, state_warning_event->input_value); input_sync(state_warning_event->input_dev); } } static void privacy_state_event_work_func(struct work_struct *work) { bool value; struct privacy_state_event *state_event = container_of(work, struct privacy_state_event, work); value = gpio_get_value_cansleep(state_event->state_gpio); if (state_event->state_gpio_flags & OF_GPIO_ACTIVE_LOW) value = !value; input_event(state_event->input_dev, state_event->input_type, state_event->code, value); input_sync(state_event->input_dev); } static void privacy_button_event_work_func(struct work_struct *work) { int value; struct privacy_button_event *button_event = container_of(work, struct privacy_button_event, work.work); value = gpio_get_value_cansleep(button_event->button_gpio); if (unlikely(value < 0)) { /* * gpio read can fail, however we should report button * press in order to notify userspace that privacy * state has been changed. force it to * !button_event->last_button_event for that case in the hope * we just missed one press or release. */ pr_warn_ratelimited("gpio-privacy: gpio %d read failed=%d\n", button_event->button_gpio, value); value = !button_event->last_button_event; } else if (button_event->button_gpio_flags & OF_GPIO_ACTIVE_LOW) { value = !value; } if (button_event->last_button_event == value) { /* * We can reach here when : * 1) previous press/release has been canceled due to * debouce interval. * 2) gpio_get_value() failed. * 3) button is pressed and released then we got irqs together. * * We should report button press by all means in order for * userspace to be notified about new privacy mode change. * Thus send out an artificial event. * * Unlike the mute enable case, mute disable takes no delay * to complete the mode switching. Thus if the mute button is * already released, read of the current mute status gives us * the newly switched status. In this case, we shouldn&#39;t read * the current privacy status and toggle. */ handle_privacy_button_event(button_event, !value, false); input_event(button_event->input_dev, button_event->input_type, button_event->code, !value); input_sync(button_event->input_dev); } else { button_event->last_button_event = value; } handle_privacy_button_event(button_event, value, true); input_event(button_event->input_dev, button_event->input_type, button_event->code, value); input_sync(button_event->input_dev); if (button_event->wakeup_capable) pm_relax(button_event->input_dev->dev.parent); } static irqreturn_t privacy_state_interrupt(int irq, void *arg) { struct privacy_state_event *state_event = arg; schedule_work(&state_event->work); return IRQ_HANDLED; } static irqreturn_t privacy_button_interrupt(int irq, void *arg) { struct privacy_button_event *button_event = arg; if (button_event->wakeup_capable) pm_stay_awake(button_event->input_dev->dev.parent); cancel_delayed_work(&button_event->work); schedule_delayed_work(&button_event->work, msecs_to_jiffies(button_event->debounce_interval)); return IRQ_HANDLED; } static int privacy_request_interrupts(struct platform_device *pdev) { int ret; struct privacy_priv *priv = platform_get_drvdata(pdev); struct privacy_state_event *state_event = priv->state_event; struct privacy_button_event *button_event = priv->button_event; ret = devm_request_any_context_irq(&pdev->dev, gpio_to_irq(state_event->state_gpio), privacy_state_interrupt, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "gpio-privacy-state", state_event); if (ret < 0) return ret; ret = devm_request_any_context_irq(&pdev->dev, gpio_to_irq(button_event->button_gpio), privacy_button_interrupt, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "gpio-privacy", button_event); if (ret < 0) return ret; return 0; } static int privacy_setup_state_warning_event(struct platform_device *pdev) { int ret; struct input_dev *input; struct device *dev = &pdev->dev; struct privacy_priv *priv = platform_get_drvdata(pdev); /* state_warning_event is optional dts node */ if (priv->state_warning_event == NULL) return 0; INIT_WORK(&priv->state_warning_event->work, privacy_state_warning_event_work_func); input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; input->name = "gpio-privacy-state-warning"; input->dev.parent = &pdev->dev; input_set_capability(input, priv->state_warning_event->input_type, priv->state_warning_event->code); priv->state_warning_event->input_dev = input; priv->state_warning_event->priv = priv; ret = input_register_device(input); if (ret) return ret; return 0; } static int privacy_setup_state_event(struct platform_device *pdev) { int ret; struct input_dev *input; struct device *dev = &pdev->dev; struct privacy_priv *priv = platform_get_drvdata(pdev); INIT_WORK(&priv->state_event->work, privacy_state_event_work_func); input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; input->name = "gpio-privacy-state"; input->dev.parent = &pdev->dev; input_set_capability(input, priv->state_event->input_type, priv->state_event->code); priv->state_event->input_dev = input; priv->state_event->priv = priv; ret = input_register_device(input); if (ret) return ret; /* seed initial value if already in a muted state */ if (priv->state_event->input_type == EV_SW && __privacy_state(priv)) { input_event(priv->state_event->input_dev, priv->state_event->input_type, priv->state_event->code, 1); input_sync(priv->state_event->input_dev); } return 0; } static int privacy_setup_button_event(struct platform_device *pdev) { int ret; struct input_dev *input; struct device *dev = &pdev->dev; struct privacy_priv *priv = platform_get_drvdata(pdev); INIT_DELAYED_WORK(&priv->button_event->work, privacy_button_event_work_func); input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; input->name = "gpio-privacy-button"; input->dev.parent = &pdev->dev; input_set_capability(input, priv->button_event->input_type, priv->button_event->code); priv->button_event->input_dev = input; priv->button_event->priv = priv; ret = input_register_device(input); if (ret) return ret; return 0; } #ifdef CONFIG_OF static int privacy_state_warning_event_parse_of(struct platform_device *pdev) { struct device_node *node; struct device_node *state_warning_event_node; struct privacy_priv *priv = platform_get_drvdata(pdev); node = pdev->dev.of_node; state_warning_event_node = of_get_child_by_name(node, "state_warning_event"); if (!state_warning_event_node) { /* state warning event is optional in dts */ dev_warn(&pdev->dev, "No state warning event configured in dts\n"); return 0; } priv->state_warning_event = devm_kzalloc(&pdev->dev, sizeof(*priv->state_warning_event), GFP_KERNEL); if (!priv->state_warning_event) return -ENOMEM; priv->state_warning_event->desc = of_get_property(state_warning_event_node, "label", NULL); if (of_property_read_u32(state_warning_event_node, "linux,input-type", &priv->state_warning_event->input_type)) priv->state_warning_event->input_type = EV_KEY; if (of_property_read_u32(state_warning_event_node, "linux,code", &priv->state_warning_event->code)) return -EINVAL; if (priv->state_warning_event->input_type == EV_MSC && priv->state_warning_event->code == MSC_RAW) { if (of_property_read_u32(state_warning_event_node, "linux,input-value", &priv->state_warning_event->input_value)) return -EINVAL; } return 0; } static int privacy_state_event_parse_of(struct platform_device *pdev) { int ret; enum of_gpio_flags flags; struct device_node *node; struct device_node *state_event_node; struct privacy_priv *priv = platform_get_drvdata(pdev); node = pdev->dev.of_node; state_event_node = of_get_child_by_name(node, "state_event"); if (!state_event_node) { dev_err(&pdev->dev, "No state event configured in dts\n"); return -EINVAL; } priv->state_event = devm_kzalloc(&pdev->dev, sizeof(*priv->state_event), GFP_KERNEL); if (!priv->state_event) return -ENOMEM; priv->state_event->desc = of_get_property(state_event_node, "label", NULL); if (of_property_read_u32(state_event_node, "linux,input-type", &priv->state_event->input_type)) priv->state_event->input_type = EV_KEY; if (of_property_read_u32(state_event_node, "linux,code", &priv->state_event->code)) return -EINVAL; priv->state_event->state_gpio = of_get_gpio_flags(state_event_node, 0, &flags); if (!gpio_is_valid(priv->state_event->state_gpio)) { dev_err(&pdev->dev, "No state gpios configured in dts\n"); return -EINVAL; } priv->state_event->state_gpio_flags = flags; ret = devm_gpio_request_one(&pdev->dev, priv->state_event->state_gpio, GPIOF_IN, "privacy-state-gpio"); if (ret) return ret; dev_info(&pdev->dev, "state gpio %d configured.\n", priv->state_event->state_gpio); return 0; } static int privacy_button_event_parse_of(struct platform_device *pdev) { int ret; enum of_gpio_flags flags; struct device_node *node; struct device_node *button_event_node; struct privacy_priv *priv = platform_get_drvdata(pdev); node = pdev->dev.of_node; button_event_node = of_get_child_by_name(node, "button_event"); if (!button_event_node) { dev_err(&pdev->dev, "No button event configured in dts\n"); return -EINVAL; } priv->button_event = devm_kzalloc(&pdev->dev, sizeof(*priv->button_event), GFP_KERNEL); if (!priv->button_event) return -ENOMEM; priv->button_event->desc = of_get_property(button_event_node, "label", NULL); if (of_property_read_u32(button_event_node, "linux,input-type", &priv->button_event->input_type)) priv->button_event->input_type = EV_KEY; if (of_property_read_u32(button_event_node, "linux,code", &priv->button_event->code)) return -EINVAL; if (of_property_read_u32(button_event_node, "debounce-interval", &priv->button_event->debounce_interval)) priv->button_event->debounce_interval = DEFAULT_DEBOUNCE_INTERVAL; priv->button_event->button_gpio = of_get_gpio_flags(button_event_node, 0, &flags); if (!gpio_is_valid(priv->button_event->button_gpio)) { dev_err(&pdev->dev, "No button gpios configured in dts\n"); return -EINVAL; } priv->button_event->button_gpio_flags = flags; ret = devm_gpio_request_one(&pdev->dev, priv->button_event->button_gpio, GPIOF_IN, "privacy-button-gpio"); if (ret) return ret; dev_info(&pdev->dev, "button gpio %d configured.\n", priv->button_event->button_gpio); priv->button_event->wakeup_capable = of_property_read_bool(button_event_node, "wakeup-source"); return 0; } static int privacy_parse_of(struct platform_device *pdev) { enum of_gpio_flags flags; int gpio, ret, gpio_init_val; struct privacy_priv *priv = platform_get_drvdata(pdev); gpio = of_get_named_gpio_flags(pdev->dev.of_node, "enable-gpio", 0, &flags); if (!gpio_is_valid(gpio)) { dev_err(&pdev->dev, "No enable gpio configured in dts\n"); return -EPROBE_DEFER; } if (flags & OF_GPIO_ACTIVE_LOW) gpio_init_val = GPIOF_OUT_INIT_HIGH; else gpio_init_val = GPIOF_OUT_INIT_LOW; ret = devm_gpio_request_one(&pdev->dev, gpio, gpio_init_val, "privacy-enable-gpio"); if (ret) return ret; priv->enable_gpio = gpio; priv->enable_gpio_flags = flags; priv->is_desired_privacy_state_on = false; if (of_property_read_u32(pdev->dev.of_node, "enable-gpio-toggle-duration", &priv->enable_gpio_toggle_duration)) priv->enable_gpio_toggle_duration = 0; if (of_property_read_u32(pdev->dev.of_node, "auto-toggle-enable-gpio-time", &priv->auto_toggle_enable_gpio_time)) priv->auto_toggle_enable_gpio_time = -1; if (of_property_read_u32(pdev->dev.of_node, "privacy-event-max-press-duration", &priv->privacy_event_max_press_duration)) priv->privacy_event_max_press_duration = 0; if (priv->auto_toggle_enable_gpio_time >= 0) INIT_DELAYED_WORK(&priv->work, privacy_work_func); return 0; } #else static int privacy_button_event_parse_of(struct platform_device *pdev) { return -EINVAL; } static int privacy_parse_of(struct platform_device *pdev) { return -EINVAL; } #endif static enum privacy_state __privacy_state(struct privacy_priv *priv) { struct privacy_state_event *state_event = priv->state_event; int value = gpio_get_value_cansleep(state_event->state_gpio); if ((!value && state_event->state_gpio_flags & OF_GPIO_ACTIVE_LOW) || (value && !(state_event->state_gpio_flags & OF_GPIO_ACTIVE_LOW))) /* return true when privacy state is on */ return PRIVACY_STATE_ON; return PRIVACY_STATE_OFF; } static int __set_privacy_enable(struct privacy_priv *priv) { int i = 0; int value = 1; /* default to 1, active high, unless proven otherwise */ const int max_wait = 100; pr_info("%s: Enter\n", __func__); if (priv->enable_gpio_flags & OF_GPIO_ACTIVE_LOW) value = 0; gpio_set_value_cansleep(priv->enable_gpio, value); if (priv->enable_gpio_toggle_duration > 0) { /* * toggle enable_gpio for specified duration but do not * wait for privacy enabled */ if (priv->enable_gpio_toggle_duration < 20) usleep_range((priv->enable_gpio_toggle_duration * 1000), (priv->enable_gpio_toggle_duration * 1000) + 100); else msleep(priv->enable_gpio_toggle_duration); } else { /* * wait for privacy enabled for up to 100ms or when * privacy state is set (which ever comes first) */ while (i < max_wait) { if (__privacy_state(priv)) break; usleep_range(1000, 1100); i++; } } gpio_set_value_cansleep(priv->enable_gpio, !value); pr_info("%s: Leave\n", __func__); if (i == max_wait) return -ETIMEDOUT; return 0; } static enum privacy_state privacy_state(struct device *dev) { enum privacy_state cur_state; struct platform_device *pdev = to_platform_device(dev); struct privacy_priv *priv = platform_get_drvdata(pdev); mutex_lock(&priv->mutex); cur_state = __privacy_state(priv); mutex_unlock(&priv->mutex); return cur_state; } static ssize_t show_privacy_state(struct device *dev, struct device_attribute *attr, char *buf) { enum privacy_state state = privacy_state(dev); return snprintf(buf, PAGE_SIZE, "%d\n", state); } static int set_privacy_enable(struct device *dev) { int ret = 0; struct platform_device *pdev = to_platform_device(dev); struct privacy_priv *priv = platform_get_drvdata(pdev); mutex_lock(&priv->mutex); pr_info("%s: privacy state enable immediately\n", __func__); __set_privacy_enable(priv); mutex_unlock(&priv->mutex); return ret; } static ssize_t store_privacy_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int enable, ret; if (!kstrtoint(buf, 10, &enable)) { /* * Don&#39;t allow userspace to turn off Privacy Mode because * privacy hardware circuit won&#39;t allow it. */ if (enable == PRIVACY_STATE_OFF) return -EINVAL; ret = set_privacy_enable(dev); if (ret) return ret; } else { return -EINVAL; } return count; } static DEVICE_ATTR(enable, S_IWUSR | S_IWGRP, NULL, store_privacy_enable); static DEVICE_ATTR(state, S_IRUGO, show_privacy_state, NULL); static struct attribute *gpio_privacy_attrs[] = { &dev_attr_enable.attr, &dev_attr_state.attr, NULL, }; static struct attribute_group gpio_privacy_attr_group = { .attrs = gpio_privacy_attrs, }; static int gpio_privacy_probe(struct platform_device *pdev) { int ret; struct privacy_priv *priv; struct device *dev = &pdev->dev; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_init(&priv->mutex); platform_set_drvdata(pdev, priv); ret = privacy_parse_of(pdev); if (ret) { pr_err("failed to parse device tree = %d\n", ret); return ret; } ret = privacy_state_warning_event_parse_of(pdev); if (ret) { pr_err("failed to parse state warning event device tree = %d\n", ret); return ret; } ret = privacy_state_event_parse_of(pdev); if (ret) { pr_err("failed to parse state event device tree = %d\n", ret); return ret; } ret = privacy_button_event_parse_of(pdev); if (ret) { pr_err("failed to parse button event device tree = %d\n", ret); return ret; } ret = privacy_setup_state_warning_event(pdev); if (ret) { pr_err("failed to setup state warning event = %d\n", ret); return ret; } ret = privacy_setup_state_event(pdev); if (ret) { pr_err("failed to setup state event = %d\n", ret); return ret; } ret = privacy_setup_button_event(pdev); if (ret) { pr_err("failed to setup button event = %d\n", ret); return ret; } ret = privacy_request_interrupts(pdev); if (ret) { pr_err("failed to request interrupt = %d\n", ret); return ret; } ret = sysfs_create_group(&dev->kobj, &gpio_privacy_attr_group); if (ret) { pr_err("failed to create sysfs group = %d\n", ret); return ret; } device_init_wakeup(&pdev->dev, priv->button_event->wakeup_capable); return 0; } static int gpio_privacy_remove(struct platform_device *pdev) { struct privacy_priv *priv; struct device *dev = &pdev->dev; struct privacy_state_warning_event *state_warning_event; struct privacy_state_event *state_event; struct privacy_button_event *button_event; priv = platform_get_drvdata(pdev); state_warning_event = priv->state_warning_event; state_event = priv->state_event; button_event = priv->button_event; if (priv->auto_toggle_enable_gpio_time >= 0) cancel_delayed_work_sync(&priv->work); if (state_warning_event != NULL) cancel_work_sync(&state_warning_event->work); cancel_work_sync(&state_event->work); cancel_delayed_work_sync(&button_event->work); sysfs_remove_group(&dev->kobj, &gpio_privacy_attr_group); if (state_warning_event != NULL) pm_relax(state_warning_event->input_dev->dev.parent); pm_relax(state_event->input_dev->dev.parent); pm_relax(button_event->input_dev->dev.parent); mutex_destroy(&priv->mutex); return 0; } #ifdef CONFIG_PM_SLEEP static int gpio_privacy_suspend(struct device *dev) { struct privacy_priv *priv; struct privacy_state_event *state_event; struct privacy_button_event *button_event; struct platform_device *pdev = to_platform_device(dev); priv = platform_get_drvdata(pdev); state_event = priv->state_event; button_event = priv->button_event; if (button_event->wakeup_capable) { int error; enable_irq_wake(gpio_to_irq(button_event->button_gpio)); error = irq_set_irq_type(gpio_to_irq(button_event->button_gpio), IRQ_TYPE_EDGE_BOTH); if (error) { pr_err("%s: failed to set wakeup trigger for gpio-privacy, err=%d\n", __func__, error); disable_irq_wake(gpio_to_irq(button_event->button_gpio)); return error; } } return 0; } static int gpio_privacy_resume(struct device *dev) { struct privacy_priv *priv; struct privacy_state_event *state_event; struct privacy_button_event *button_event; struct platform_device *pdev = to_platform_device(dev); priv = platform_get_drvdata(pdev); state_event = priv->state_event; button_event = priv->button_event; if (button_event->wakeup_capable) { int error; error = irq_set_irq_type(gpio_to_irq(button_event->button_gpio), IRQ_TYPE_EDGE_BOTH); if (error) pr_err("%s: failed to restore interrupt trigger gpio-privacy, err=%d\n", __func__, error); disable_irq_wake(gpio_to_irq(button_event->button_gpio)); } return 0; } #endif #ifdef CONFIG_OF static const struct of_device_id privacy_of_table[] = { { .compatible = "gpio-privacy", }, { }, }; MODULE_DEVICE_TABLE(of, privacy_of_table); #endif #ifdef CONFIG_PM_SLEEP static SIMPLE_DEV_PM_OPS(gpio_privacy_pm_ops, gpio_privacy_suspend, gpio_privacy_resume); #endif static struct platform_driver gpio_privacy_driver = { .driver = { .name = "gpio-privacy", #ifdef CONFIG_PM_SLEEP .pm = &gpio_privacy_pm_ops, #endif .of_match_table = of_match_ptr(privacy_of_table), }, .probe = gpio_privacy_probe, .remove = gpio_privacy_remove, }; static int __init gpio_privacy_init(void) { return platform_driver_register(&gpio_privacy_driver); } static void __exit gpio_privacy_exit(void) { platform_driver_unregister(&gpio_privacy_driver); } module_init(gpio_privacy_init); module_exit(gpio_privacy_exit); MODULE_LICENSE("GPL"); how to enable pr_debug ?
08-16
/* * Copyright (c) 2018 Actions Semiconductor Co., Ltd * * SPDX-License-Identifier: Apache-2.0 */ /** * @brief PWM controller driver for Actions SoC */ #include <errno.h> #include <sys/__assert.h> #include <stdbool.h> #include <kernel.h> #include <device.h> #include <init.h> #include <drivers/pwm.h> #include <soc.h> #include <drivers/dma.h> #include <errno.h> #include <soc_regs.h> #include "pwm_context.h" #include <drivers/cfg_drv/dev_config.h> #include <soc.h> #define LOG_LEVEL CONFIG_LOG_PWM_DEV_LEVEL #include <logging/log.h> LOG_MODULE_REGISTER(pwm); #define DMA_IRQ_TC (0) /* DMA completion flag */ #define DMA_IRQ_HF (1) /* DMA half-full flag */ enum PWM_GROUP { PWM_GROUP0_REG, PWM_GROUP1_REG, PWM_GROUP2_REG, PWM_GROUP3_REG, PWM_GROUP4_REG, PWM_GROUP5_REG, PWM_GROUP_MAX, }; #define PWM_FIFO_REG (6) #define PWM_IR_REG (7) #define PWM_INTCTL_REG (8) #define PWM_PENDING_REG (9) enum PWM_MODE { PWM_DEFAULT_REG, PWM_FIX_INIT, PWM_BTH_INIT, PWM_PRG_INIT, PWM_IR_INIT, PWM_MODE_MAX, }; #define PWM_MODE_MASK (0x7) #define PWM_chan(x) (1 << (3 + x)) #define PWM_chan_act(x) (1 << (9 + x)) #define PWM_chan_act_MASK (0x7e00) #define ir_code_pre_sym(a) ((a&0x8000) >> 15) #define ir_code_pre_val(a) ((a&0x7f00) >> 8) #define ir_code_pos_sym(a) ((a&0x80) >> 7) #define ir_code_pos_val(a) ((a&0x7f) >> 0) #define PWM_IR_REPEAT_MODE (0 << 8) #define PWM_IR_CYCLE_MODE (1 << 8) #define PWM_IR_MASK (0xff) #define PWM_IR_TX_MARGIN (1000) #define PWM_IR_TIMEOUT (1) /* IR_RX_ANA_CTL */ #define TX_ANA_EN (1) #define RX_ANA_CTL(base) (base + 0xf0) #define TX_ANA_CTL(base) (base + 0xf4) #define IR_TX_DINV (1 << 8) #define IR_TX_SR(X) (X << 4) #define IR_TX_POUT(X) (X << 1) #define IR_TX_EN (1) struct pwm_acts_data { struct k_sem dma_sync; struct k_sem ir_sync; struct k_sem ir_transfer_sync; struct device *dma_dev; int dma_chan; int (*program_callback)(void *cb_data, u8_t reason); void *cb_data; u8_t program_pin; u16_t group_init_status[6]; u32_t pwm_ir_sw; u32_t buf_num; u32_t pwm_ir_mode; struct k_timer timer; u32_t ir_event_timeout; u32_t pwm_ir_lc[2]; u32_t pwm_ir_ll[2]; u32_t pwm_ir_ld[2]; u32_t pwm_ir_pl[2]; u32_t pwm_ir_pd0[2]; u32_t pwm_ir_pd1[2]; u32_t pwm_ir_sl[2]; u8_t ir_pout; bool manual_stop_flag; }; struct pwm_acts_config { u32_t base; u32_t pwmclk_reg; u32_t cycle; u8_t clock_id; u8_t reset_id; const struct acts_pin_config *pinmux; u8_t pinmux_size; void (*irq_config_func)(void); const char *dma_dev_name; u8_t txdma_id; u8_t flag_use_dma; }; void pwm_acts_repeat_event_process(const struct pwm_acts_config *cfg, u32_t pending) { uint32_t pwm_base; if(pending & PWM_PENDING_G0REPEAT) { sys_write32(~(PWM_PENDING_G0REPEAT) & sys_read32(PWM_INT_CTL(cfg->base)), PWM_INT_CTL(cfg->base)); pwm_base = PWM0_BASE(cfg->base); struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)pwm_base; pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CHx_MODE_SEL(0,0xfff)); } if(pending & PWM_PENDING_G1REPEAT) { sys_write32(~(PWM_PENDING_G1REPEAT) & sys_read32(PWM_INT_CTL(cfg->base)), PWM_INT_CTL(cfg->base)); pwm_base = PWM1_BASE(cfg->base); struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)pwm_base; pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CHx_MODE_SEL(0,0xfff)); } if(pending & PWM_PENDING_G2REPEAT) { sys_write32(~(PWM_PENDING_G2REPEAT) & sys_read32(PWM_INT_CTL(cfg->base)), PWM_INT_CTL(cfg->base)); pwm_base = PWM2_BASE(cfg->base); struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)pwm_base; pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CHx_MODE_SEL(0,0x3)); } if(pending & PWM_PENDING_G3REPEAT) { sys_write32(~(PWM_PENDING_G3REPEAT) & sys_read32(PWM_INT_CTL(cfg->base)), PWM_INT_CTL(cfg->base)); pwm_base = PWM3_BASE(cfg->base); struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)pwm_base; pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CHx_MODE_SEL(0,0x3)); } if(pending & PWM_PENDING_G4REPEAT) { sys_write32(~(PWM_PENDING_G4REPEAT) & sys_read32(PWM_INT_CTL(cfg->base)), PWM_INT_CTL(cfg->base)); pwm_base = PWM4_BASE(cfg->base); struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)pwm_base; pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CHx_MODE_SEL(0,0x3)); } } static void pwm_acts_ir_timeout_event(struct k_timer *timer) { struct pwm_acts_data *data = k_timer_user_data_get(timer); struct acts_pwm_ir *pwm_ir = (struct acts_pwm_ir *)PWM_IR(PWM_REG_BASE); pwm_ir->ir_ll = data->pwm_ir_ll[data->pwm_ir_sw]; pwm_ir->ir_ld = data->pwm_ir_ld[data->pwm_ir_sw]; pwm_ir->ir_pd0 = data->pwm_ir_pd0[data->pwm_ir_sw]; pwm_ir->ir_pd1 = data->pwm_ir_pd1[data->pwm_ir_sw]; pwm_ir->ir_sl = data->pwm_ir_sl[data->pwm_ir_sw]; pwm_ir->ir_pl = data->pwm_ir_pl[data->pwm_ir_sw]; pwm_ir->ir_lc = data->pwm_ir_lc[data->pwm_ir_sw]; pwm_ir->ir_ctl |= PWM_IRCTL_CU; if(data->pwm_ir_sw < data->buf_num) data->pwm_ir_sw++; if(data->pwm_ir_sw >= data->buf_num) { if(data->pwm_ir_mode & PWM_IR_CYCLE_MODE) data->pwm_ir_sw = 0; else data->pwm_ir_sw = data->buf_num -1; } k_timer_stop(&data->timer); } void pwm_acts_isr(void *arg) { struct device *dev = (struct device *)arg; struct pwm_acts_data *data = dev->data; const struct pwm_acts_config *cfg = dev->config; struct acts_pwm_ir *pwm_ir = (struct acts_pwm_ir *)PWM_IR(cfg->base); unsigned int key; key = irq_lock(); if((sys_read32(PWM_PENDING(cfg->base)) & PWM_PENDING_IRSS) && (data->buf_num > 1)) { pwm_ir->ir_ctl |= PWM_IRCTL_CU; u16_t timeout; timeout = (data->ir_event_timeout * pwm_ir->ir_ll)/1000 + PWM_IR_TIMEOUT; k_timer_start(&data->timer, K_MSEC(timeout), K_MSEC(timeout)); } irq_unlock(key); if(sys_read32(PWM_PENDING(cfg->base)) & PWM_PENDING_IRAE) { if (data->manual_stop_flag) { k_sem_give(&data->ir_sync); data->group_init_status[PWM_GROUP5_REG] = 0; } else { /* continue to send repeat code or data code */ pwm_ir->ir_ctl |= PWM_IRCTL_START; } } if(sys_read32(PWM_PENDING(cfg->base)) & (PWM_PENDING_REPEAT_MASK & sys_read32(PWM_INT_CTL(cfg->base)))) { pwm_acts_repeat_event_process(cfg, sys_read32(PWM_PENDING(cfg->base))); } sys_write32(0xffffffff, PWM_PENDING(cfg->base)); } static void pwm_acts_set_clk(const struct pwm_acts_config *cfg, uint32_t group, uint32_t freq_hz) { clk_set_rate(cfg->clock_id + group, freq_hz); k_busy_wait(100); } static u32_t pwm_acts_get_group(u32_t pwm) { u32_t group; if(pwm > 15) return -EINVAL; if((pwm) < 6) group = PWM_GROUP0_REG; else if((pwm) < 12) group = PWM_GROUP1_REG; else group = pwm -10; return group; } static u32_t pwm_acts_get_reg_base(u32_t base, uint32_t REG) { u32_t controller_reg; switch(REG) { case PWM_GROUP0_REG: controller_reg = PWM0_BASE(base); break; case PWM_GROUP1_REG: controller_reg = PWM1_BASE(base); break; case PWM_GROUP2_REG: controller_reg = PWM2_BASE(base); break; case PWM_GROUP3_REG: controller_reg = PWM3_BASE(base); break; case PWM_GROUP4_REG: controller_reg = PWM4_BASE(base); break; case PWM_GROUP5_REG: controller_reg = PWM5_BASE(base); break; case PWM_FIFO_REG: controller_reg = PWM_FIFO(base); break; case PWM_IR_REG: controller_reg = PWM_IR(base); break; case PWM_INTCTL_REG: controller_reg = PWM_INT_CTL(base); break; case PWM_PENDING_REG: controller_reg = PWM_PENDING(base); break; default: return -EINVAL; } return controller_reg; } /* * Set the period and pulse width for a PWM pin. * * Parameters * dev: Pointer to PWM device structure * pwm: PWM channel to set * period_cycles: Period (in timer count) * pulse_cycles: Pulse width (in timer count). * @param flags Flags for pin configuration (polarity). * return 0, or negative errno code */ static void pwm_acts_groupx_fix_init(u32_t base, u32_t period_cycles, u32_t pulse_cycles, u8_t chan, u8_t function, pwm_flags_t flags) { struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)base; u32_t pol_param; pwm->ctrl |= PWMx_CTRL_CHx_MODE_SEL(chan,1); if(chan < 4) { pol_param = PWMx_CH_CTL0_CHx_POL_SEL(chan); if(flags) pwm->ch_ctrl0 |= pol_param; else pwm->ch_ctrl0 = pwm->ch_ctrl0 & (~pol_param); } else { pol_param = PWMx_CH_CTL1_CHx_POL_SEL(chan); if(flags) pwm->ch_ctrl1 |= pol_param; else pwm->ch_ctrl1 = pwm->ch_ctrl1 & (~pol_param); } if(function) { pwm->cntmax = period_cycles; pwm->cmp[chan] = pulse_cycles; if((pwm->ctrl & PWMx_CTRL_HUA) == 0) pwm->ctrl |= PWMx_CTRL_HUA; else { k_usleep(30); pwm->ctrl |= PWMx_CTRL_HUA; } return; } pwm->cntmax = period_cycles; pwm->cmp[chan] = pulse_cycles; pwm->ctrl |= PWMx_CTRL_CNT_EN;//norlmal mode } static int pwm_acts_pin_set(const struct device *dev, uint32_t pwm, u32_t period_cycles, u32_t pulse_cycles, pwm_flags_t flags) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; uint32_t base,group; u16_t status; LOG_INF("PWM@%d set period cycles %d ms, pulse cycles %d ms", pwm, period_cycles, pulse_cycles); // period_cycles = period_cycles * pwm_normal_clk_rate / 1000; // pulse_cycles = pulse_cycles * pwm_normal_clk_rate / 1000; if (pulse_cycles > period_cycles) { LOG_ERR("pulse cycles %d is biger than period&#39;s %d", pulse_cycles, period_cycles); return -EINVAL; } group = pwm_acts_get_group(pwm); status = data->group_init_status[group]; if((status&PWM_MODE_MASK) != PWM_DEFAULT_REG && (status&PWM_MODE_MASK) != PWM_FIX_INIT) { LOG_ERR("start a fix mode but have not stop this group bfore!"); return -EINVAL; } pwm = (pwm > 5)?pwm-6:pwm; pwm = (pwm > 5)?pwm-4-group:pwm; base = pwm_acts_get_reg_base(cfg->base, group); if((status & PWM_chan(pwm)) == 0)//this group_chan have not initialized yet. pwm_acts_groupx_fix_init(base,period_cycles,pulse_cycles,pwm, 0, flags); else pwm_acts_groupx_fix_init(base,period_cycles,pulse_cycles,pwm, 1, flags); if(pulse_cycles == 0) { data->group_init_status[group] = data->group_init_status[group] & ~(PWM_chan_act(pwm)); } else { data->group_init_status[group] = PWM_FIX_INIT | PWM_chan(pwm) | status; data->group_init_status[group] |= PWM_chan_act(pwm); } return 0; } /* * Set the period and pulse width for a PWM pin. * * Parameters * dev: Pointer to PWM device structure * pwm: PWM channel to set * ctrl: breath mode control * return 0, or negative errno code */ #ifdef CONFIG_PWM_TAI_FULL_FUNC static void pwm_acts_groupx_breath_init(u32_t base, u8_t chan, pwm_breath_ctrl_t *ctrl) { struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)base; struct acts_pwm_breath_mode *breath = (struct acts_pwm_breath_mode *)(PWM_BREATH(base) + chan * PWM_BREATH_REG_SIZE); pwm->cntmax = ctrl->pwm_count_max; if(ctrl->stage_a_step) breath->pwm_bth_a = PWMx_BTHxy_EN | PWMx_BTHxy_XS(ctrl->stage_a_pwm) | PWMx_BTHxy_REPEAT(ctrl->stage_a_repeat) | PWMx_BTHxy_STEP(ctrl->stage_a_step); else breath->pwm_bth_a &= (~PWMx_BTHxy_EN); if(ctrl->stage_b_step) breath->pwm_bth_b = PWMx_BTHxy_EN | PWMx_BTHxy_XS(ctrl->stage_b_pwm) | PWMx_BTHxy_REPEAT(ctrl->stage_b_repeat) | PWMx_BTHxy_STEP(ctrl->stage_b_step); else breath->pwm_bth_b &= (~PWMx_BTHxy_EN); if(ctrl->stage_c_step) breath->pwm_bth_c = PWMx_BTHxy_EN | PWMx_BTHxy_XS(ctrl->stage_c_pwm) | PWMx_BTHxy_REPEAT(ctrl->stage_c_repeat) | PWMx_BTHxy_STEP(ctrl->stage_c_step); else breath->pwm_bth_c &= (~PWMx_BTHxy_EN); if(ctrl->stage_d_step) breath->pwm_bth_d = PWMx_BTHxy_EN | PWMx_BTHxy_XS(ctrl->stage_d_pwm) | PWMx_BTHxy_REPEAT(ctrl->stage_d_repeat) | PWMx_BTHxy_STEP(ctrl->stage_d_step); else breath->pwm_bth_d &= (~PWMx_BTHxy_EN); if(ctrl->stage_e_step) breath->pwm_bth_e = PWMx_BTHxy_EN | PWMx_BTHxy_XS(ctrl->stage_e_pwm) | PWMx_BTHxy_REPEAT(ctrl->stage_e_repeat) | PWMx_BTHxy_STEP(ctrl->stage_e_step); else breath->pwm_bth_e &= (~PWMx_BTHxy_EN); if(ctrl->stage_f_step) breath->pwm_bth_f = PWMx_BTHxy_EN | PWMx_BTHxy_XS(ctrl->stage_f_pwm) | PWMx_BTHxy_REPEAT(ctrl->stage_f_repeat) | PWMx_BTHxy_STEP(ctrl->stage_f_step); else breath->pwm_bth_f &= (~PWMx_BTHxy_EN); if(ctrl->stage_low_wait) breath->pwm_bth_hl = PWMx_BTHx_HL_L(ctrl->stage_low_wait) | PWMx_BTHx_HL_LEN; else breath->pwm_bth_hl &= (~PWMx_BTHx_HL_LEN); if(ctrl->stage_high_wait) breath->pwm_bth_hl = PWMx_BTHx_HL_H(ctrl->stage_high_wait) | PWMx_BTHx_HL_HEN; else breath->pwm_bth_hl &= (~PWMx_BTHx_HL_HEN); if(ctrl->start_dir) breath->pwm_bth_st = PWMx_BTHx_ST_ST(ctrl->start_pwm) | PWMx_BTHx_ST_DIR; else breath->pwm_bth_st = PWMx_BTHx_ST_ST(ctrl->start_pwm); pwm->ctrl |= PWMx_CTRL_CHx_MODE_SEL(chan,3) | PWMx_CTRL_CNT_EN;//breath mode } static int pwm_acts_set_breath_mode(const struct device *dev, uint32_t pwm, pwm_breath_ctrl_t *ctrl) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; uint32_t base,group; u16_t status; group = pwm_acts_get_group(pwm); status = data->group_init_status[group]; if((status&0x3) != PWM_BTH_INIT && (status&0x3)) {//not brearh mode or default LOG_ERR("start a breath mode but have not stop this group bfore!"); return -EINVAL; } pwm = (pwm > 5)?pwm-6:pwm; pwm = (pwm > 5)?pwm-4-group:pwm; base = pwm_acts_get_reg_base(cfg->base, group); if(group == PWM_GROUP1_REG || group == PWM_GROUP2_REG) pwm_acts_groupx_breath_init(base, pwm, ctrl); else { LOG_ERR("unsuported channel: %d",pwm); return -EINVAL; } data->group_init_status[group] = PWM_BTH_INIT | PWM_chan(pwm) | status; return 0; } #endif /* * Set the period and pulse width for a PWM pin. * * Parameters * dev: Pointer to PWM device structure * pwm: PWM channel to set * ctrl: program mode control * return 0, or negative errno code */ #ifdef CONFIG_PWM_TAI_FULL_FUNC static void dma_done_callback(const struct device *dev, void *callback_data, uint32_t ch , int type) { struct pwm_acts_data *data = (struct pwm_acts_data *)callback_data; //if (type != DMA_IRQ_TC) //return; LOG_DBG("pwm dma transfer is done"); k_sem_give(&data->dma_sync); } static int pwm_acts_start_dma(const struct pwm_acts_config *cfg, struct pwm_acts_data *data, uint32_t dma_chan, uint16_t *buf, int32_t len, bool is_tx, void *callback) { struct acts_pwm_fifo *pwm_fifo = (struct acts_pwm_fifo *)PWM_FIFO(cfg->base); struct dma_config dma_cfg = {0}; struct dma_block_config dma_block_cfg = {0}; if (callback) { dma_cfg.dma_callback = (dma_callback_t)callback; dma_cfg.user_data = data; dma_cfg.complete_callback_en = 1; } dma_cfg.block_count = 1; dma_cfg.head_block = &dma_block_cfg; dma_block_cfg.block_size = len; if (is_tx) { dma_cfg.dma_slot = cfg->txdma_id; dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; dma_block_cfg.source_address = (uint32_t)buf; dma_block_cfg.dest_address = (uint32_t)&pwm_fifo->fifodat; dma_cfg.dest_data_size = 2; } else { return -EINVAL; } dma_cfg.source_burst_length = 4; if (dma_config(data->dma_dev, dma_chan, &dma_cfg)) { LOG_ERR("dma%d config error", dma_chan); return -1; } if (dma_start(data->dma_dev, dma_chan)) { LOG_ERR("dma%d start error", dma_chan); return -1; } return 0; } static void pwm_acts_stop_dma(const struct pwm_acts_config *cfg, struct pwm_acts_data *data, uint32_t dma_chan) { dma_stop(data->dma_dev, dma_chan); } static int pwm_acts_wait_fifo_staus(struct acts_pwm_fifo *pwm_fifo, uint16_t timeout) { int start_time; start_time = k_uptime_get_32(); while(!(pwm_fifo->fifosta&PWM_FIFOSTA_EMPTY)) { if(k_uptime_get_32() - start_time > 500) return 0; } return -EINVAL; } static int pwm_acts_dma_transfer(const struct pwm_acts_config *cfg, struct pwm_acts_data *data, u8_t chan, pwm_program_ctrl_t *ctrl) { struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)cfg->base; struct acts_pwm_fifo *pwm_fifo = (struct acts_pwm_fifo *)PWM_FIFO(cfg->base); int ret; pwm->ctrl |= PWMx_CTRL_CHx_MODE_SEL(chan,2) | PWMx_CTRL_CM | PWMx_CTRL_RM; if(ctrl->cntmax) { pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CM); pwm->cntmax = ctrl->cntmax; } if(ctrl->repeat) { pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_RM); pwm->repeat = ctrl->repeat; } if(pwm_acts_wait_fifo_staus(pwm_fifo, 500)) { LOG_ERR("time out error, pwm fifo can not be empty!"); return -EINVAL; } pwm_fifo->fifoctl = PWM_FIFOCTL_START; ret = pwm_acts_start_dma(cfg, data, data->dma_chan, ctrl->ram_buf, ctrl->ram_buf_len, 1, dma_done_callback); if (ret) { LOG_ERR("faield to start dma chan 0x%x\n", data->dma_chan); goto out; } pwm->ctrl |= PWMx_CTRL_CNT_EN; /* wait until dma transfer is done */ k_sem_take(&data->dma_sync, K_FOREVER);//K_MSEC(500));// out: pwm_acts_stop_dma(cfg, data, data->dma_chan); if(pwm_acts_wait_fifo_staus(pwm_fifo, 500)) { LOG_ERR("time out error, pwm fifo can not be empty!"); return -EINVAL; } pwm->ctrl = pwm->ctrl & ~(PWMx_CTRL_CHx_MODE_SEL(chan,2) | PWMx_CTRL_CM | PWMx_CTRL_RM | PWMx_CTRL_CNT_EN); pwm_fifo->fifoctl = pwm_fifo->fifoctl & ~(PWM_FIFOCTL_START); return ret; } static int pwm_acts_set_program_mode(const struct device *dev, uint32_t pwm, pwm_program_ctrl_t *ctrl) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; uint32_t base,group; u16_t status; group = pwm_acts_get_group(pwm); status = data->group_init_status[group]; if((status&0x3) != PWM_PRG_INIT && (status&0x3)) {//not program mode or default LOG_ERR("start a breath mode but have not stop this group bfore!"); return -EINVAL; } pwm = (pwm > 5)?pwm-6:pwm; pwm = (pwm > 5)?pwm-4-group:pwm; base = pwm_acts_get_reg_base(cfg->base, group); if(group == PWM_GROUP0_REG || group == PWM_GROUP1_REG || group == PWM_GROUP2_REG) { pwm_acts_dma_transfer(cfg, data, pwm, ctrl);//这里有问题的需要修改,ctrl无法覆盖到group2和group1 } else { LOG_ERR("unsuported channel: %d",pwm); return -EINVAL; } // data->group_init_status[group] = PWM_BTH_INIT | PWM_chan(pwm) | status; return 0; } #endif //#define TX_ANA_CTL(base) (base + 0xf4) //#define IR_TX_DINV (1 << 8) //#define IR_TX_SR(X) (X << 4) //#define IR_TX_POUT(X) (X << 1) //#define IR_TX_EN (1) #if 0 static void pwm_acts_ir_reg_dump(struct acts_pwm_ir *pwm_ir) { printk("pwm_ir->ir_asc :%d\n", pwm_ir->ir_asc ); printk("pwm_ir->ir_duty :%d\n", pwm_ir->ir_duty ); printk("pwm_ir->ir_lc :%d\n", pwm_ir->ir_lc ); printk("pwm_ir->ir_ld :%d\n", pwm_ir->ir_ld ); printk("pwm_ir->ir_ll :%d\n", pwm_ir->ir_ll ); printk("pwm_ir->ir_pd0 :%d\n", pwm_ir->ir_pd0 ); printk("pwm_ir->ir_pd1 :%d\n", pwm_ir->ir_pd1 ); printk("pwm_ir->ir_period :%d\n", pwm_ir->ir_period ); printk("pwm_ir->ir_pl :%d\n", pwm_ir->ir_pl ); printk("pwm_ir->ir_pl0_post :%d\n", pwm_ir->ir_pl0_post ); printk("pwm_ir->ir_pl0_pre :%d\n", pwm_ir->ir_pl0_pre ); printk("pwm_ir->ir_pl1_post :%d\n", pwm_ir->ir_pl1_post ); printk("pwm_ir->ir_pl1_pre :%d\n", pwm_ir->ir_pl1_pre ); printk("pwm_ir->ir_sl :%d\n", pwm_ir->ir_sl ); } #endif static void pwm_acts_ir_tx(const struct pwm_acts_config *cfg, struct pwm_acts_data *data, u8_t chan, struct pwm_ir_mode_param_t *ctrl, uint32_t base) { struct acts_pwm_groupx *pwm = (struct acts_pwm_groupx *)base; struct acts_pwm_ir *pwm_ir = (struct acts_pwm_ir *)PWM_IR(cfg->base); u16_t mode; acts_pinmux_set(cfg->pinmux[cfg->pinmux_size - 1].pin_num, cfg->pinmux[cfg->pinmux_size - 1].mode); #if TX_ANA_EN sys_write32(0, RX_ANA_CTL(GPIO_REG_BASE)); sys_write32(IR_TX_EN | IR_TX_POUT(data->ir_pout), TX_ANA_CTL(GPIO_REG_BASE)); #endif while(pwm_ir->ir_ctl&PWM_IRCTL_START); data->pwm_ir_mode = ctrl[0].mode; mode = ctrl[0].mode & PWM_IR_MASK; pwm->ctrl |= PWMx_CTRL_CHx_MODE_SEL(chan,2); pwm_ir->ir_asc = ctrl[0].ir_asc; pwm_ir->ir_duty = ctrl[0].ir_duty; pwm_ir->ir_lc = ctrl[0].ir_lc; pwm_ir->ir_ld = ctrl[0].ir_ld; pwm_ir->ir_ll = ctrl[0].ir_ll; pwm_ir->ir_pd0 = ctrl[0].ir_pd0; pwm_ir->ir_pd1 = ctrl[0].ir_pd1; pwm_ir->ir_period = ctrl[0].ir_period; pwm_ir->ir_pl = ctrl[0].ir_pl; pwm_ir->ir_pl0_post = ctrl[0].ir_pl0_post; pwm_ir->ir_pl0_pre = ctrl[0].ir_pl0_pre; pwm_ir->ir_pl1_post = ctrl[0].ir_pl1_post; pwm_ir->ir_pl1_pre = ctrl[0].ir_pl1_pre; pwm_ir->ir_sl = ctrl[0].ir_sl; // pwm_acts_ir_reg_dump(pwm_ir); if(ctrl[0].buf_num > 1) { data->pwm_ir_sw = 1; data->pwm_ir_lc[0] = ctrl[0].ir_lc; data->pwm_ir_ld[0] = ctrl[0].ir_ld; data->pwm_ir_ll[0] = ctrl[0].ir_ll; data->pwm_ir_pd0[0] = ctrl[0].ir_pd0; data->pwm_ir_pd1[0] = ctrl[0].ir_pd1; data->pwm_ir_pl[0] = ctrl[0].ir_pl; data->pwm_ir_sl[0] = ctrl[0].ir_sl; data->pwm_ir_lc[1] = ctrl[1].ir_lc; data->pwm_ir_ld[1] = ctrl[1].ir_ld; data->pwm_ir_ll[1] = ctrl[1].ir_ll; data->pwm_ir_pd0[1] = ctrl[1].ir_pd0; data->pwm_ir_pd1[1] = ctrl[1].ir_pd1; data->pwm_ir_pl[1] = ctrl[1].ir_pl; data->pwm_ir_sl[1] = ctrl[1].ir_sl; } data->manual_stop_flag = false; data->buf_num = ctrl[0].buf_num; sys_write32(0xffffffff, PWM_PENDING(cfg->base)); sys_write32(PWM_INTCTL_IRAE | PWM_INTCTL_IRSS, PWM_INT_CTL(cfg->base)); pwm_ir->ir_ctl = PWM_IRCTL_PLED | PWM_IRCTL_START; pwm->ctrl |= PWMx_CTRL_CNT_EN; } static u32_t pwm_acts_data_cal(u32_t data, u32_t buf_num) { u32_t cal_val = 0; u32_t cal_dat = data; while(buf_num > 0) { if(cal_dat & 0x1) cal_val++; buf_num--; cal_dat = cal_dat >> 1; } return cal_val; } static u32_t pwm_acts_ir_get_len(u32_t ld) { u32_t len = 0; for(int i = 0; i < 32; i++) { if(ld & 0x80000000) break; len++; ld = ld << 1; } len = 32 - len; return len; } static void pwm_acts_ir_param_sw(struct ir_tx_data_param *ctrl, struct pwm_ir_mode_param_t *param, u32_t loc, struct ir_tx_protocol_param *protocol_param) { u32_t val, sym, ld_len, protocol; u16_t cr_rate; protocol = ctrl->mode; if(ctrl->rate) cr_rate = ctrl->rate/100; else cr_rate = protocol_param->ir_cr_rate; param->ir_period = pwm_clk_rate/(cr_rate * 1000); if(ctrl->duty) param->ir_duty = param->ir_period *10/ctrl->duty; else param->ir_duty = param->ir_period/3; param->ir_lc = cr_rate * protocol_param->ir_lc_bit_length/1000; sym = ir_code_pre_sym(protocol_param->ir_0_code) << 16; val = ir_code_pre_val(protocol_param->ir_0_code) * protocol_param->code_bit_length; val = val * cr_rate/1000; if(sym) val = val + 1;//process remainder param->ir_pl0_pre = sym | val; sym = ir_code_pos_sym(protocol_param->ir_0_code) << 16; val = ir_code_pos_val(protocol_param->ir_0_code) * protocol_param->code_bit_length; val = val * cr_rate/1000; if(sym) val = val + 1;//process remainder param->ir_pl0_post = sym | val; sym = ir_code_pre_sym(protocol_param->ir_1_code) << 16; val = ir_code_pre_val(protocol_param->ir_1_code) * protocol_param->code_bit_length; val = val * cr_rate/1000; if(sym) val = val + 1;//process remainder param->ir_pl1_pre = sym | val; sym = ir_code_pos_sym(protocol_param->ir_1_code) << 16; val = ir_code_pos_val(protocol_param->ir_1_code) * protocol_param->code_bit_length; val = val * cr_rate/1000; if(sym) val = val + 1;//process remainder param->ir_pl1_post = sym | val; param->ir_ll = pwm_acts_ir_get_len(protocol_param->ir_lc_code); param->ir_ld = protocol_param->ir_lc_code;//01b if(protocol & PWM_IR_CYCLE_MODE) { ld_len = pwm_acts_ir_get_len(protocol_param->ir_trc_loc); param->ir_pl = (loc == 1)?(protocol_param->ir_dc_length - ld_len):(ld_len -1); param->ir_asc = protocol_param->ir_asc; param->ir_Tf = (loc == 1)? 0 : (protocol_param->ir_Tf_length + PWM_IR_TX_MARGIN) * 10; val = ir_code_pos_val(protocol_param->ir_0_code) * protocol_param->code_bit_length; val = val * cr_rate/1000; if(protocol_param->ir_stop_bit && loc != 1) param->ir_lc = param->ir_lc - val; } else { param->ir_pl = protocol_param->ir_dc_length; param->ir_asc = protocol_param->ir_asc; param->ir_Tf = (protocol_param->ir_Tf_length + PWM_IR_TX_MARGIN) * 10; } param->ir_stop_bit = protocol_param->ir_stop_bit; } static void pwm_acts_ir_param_cal(struct ir_tx_data_param *ctrl, struct pwm_ir_mode_param_t *result, struct ir_tx_protocol_param *protocol_param) { u32_t num_0, num_1, sum_cycle, ir_sl, pwm_rate, mode, ld_len; u32_t data[2] = {0}; struct pwm_ir_mode_param_t param; mode = ctrl->mode & PWM_IR_MASK; pwm_acts_ir_param_sw(ctrl, &param , 1, protocol_param); pwm_rate = pwm_clk_rate/1000/param.ir_period; result[0].ir_period = param.ir_period; result[0].ir_duty = param.ir_duty; result[0].ir_lc = param.ir_lc; result[0].ir_pl0_pre = param.ir_pl0_pre; result[0].ir_pl0_post = param.ir_pl0_post; result[0].ir_pl1_pre = param.ir_pl1_pre; result[0].ir_pl1_post = param.ir_pl1_post; result[0].ir_ll = param.ir_ll; result[0].ir_ld = param.ir_ld;//01b if(param.ir_stop_bit) { param.ir_pl = param.ir_pl + 1;//patload 24bit+endflag ctrl->data[1] = (ctrl->data[1] << 1) | (ctrl->data[0] >> 31); ctrl->data[0] = ctrl->data[0] << 1; } ld_len = pwm_acts_ir_get_len(protocol_param->ir_trc_loc); data[0] = ctrl->data[0] >> ld_len; data[1] = ctrl->data[1] >> ld_len; result[0].ir_pl = param.ir_pl; if(param.ir_pl > 32) { result[0].ir_pd0 = data[0]; num_1 = pwm_acts_data_cal(data[0], 32); result[0].ir_pd1 = data[1]; num_1 += pwm_acts_data_cal(data[0], result[0].ir_pl - 32); } else { result[0].ir_pd0 = data[0]; num_1 = pwm_acts_data_cal(data[0], result[0].ir_pl); } num_0 = result[0].ir_pl - num_1; sum_cycle = result[0].ir_lc*result[0].ir_ll; sum_cycle += num_0*((result[0].ir_pl0_pre&0xff) + (result[0].ir_pl0_post&0xff)); sum_cycle += num_1*((result[0].ir_pl1_pre&0xff) + (result[0].ir_pl1_post&0xff)); if(param.ir_Tf) { ir_sl = (param.ir_Tf/1000 - (sum_cycle/pwm_rate)); ir_sl = ir_sl * pwm_rate / result[0].ir_lc; result[0].ir_sl = ir_sl; } else result[0].ir_sl = 0; if(ctrl->buf_num > 1 && (ctrl->mode & PWM_IR_CYCLE_MODE)) result[0].ir_asc = param.ir_asc * 2; else if(ctrl->buf_num > 1) result[0].ir_asc = param.ir_asc + 1; else result[0].ir_asc = param.ir_asc; if(ctrl->buf_num > 1) { if(ctrl->mode & PWM_IR_CYCLE_MODE) pwm_acts_ir_param_sw(ctrl, &param, 2, protocol_param); else pwm_acts_ir_param_sw(ctrl, &param, 2, protocol_param + 1); result[1].ir_lc = param.ir_lc; if(ctrl->lead == NULL) { result[1].ir_ld = param.ir_ld; result[1].ir_ll = param.ir_ll; } else { result[1].ir_ld = ctrl->lead->ld; result[1].ir_ll = ctrl->lead->ll; } if(param.ir_stop_bit) param.ir_pl = param.ir_pl + 1;//patload 24bit+endflag result[1].ir_pl = param.ir_pl; data[0] = data[1] = 0; ld_len = (1 << ld_len) - 1; data[0] = ctrl->data[0] & ld_len; data[1] = ctrl->data[1] & ld_len; if(param.ir_pl > 32) { result[1].ir_pd0 = data[0]; num_1 = pwm_acts_data_cal(data[0], 32); result[1].ir_pd1 = data[1]; num_1 += pwm_acts_data_cal(data[0], result[1].ir_pl - 32); } else { result[1].ir_pd0 = data[0]; num_1 = pwm_acts_data_cal(data[0], result[1].ir_pl); } result[1].ir_sl = 0; num_0 = result[1].ir_pl - num_1; if(ctrl->mode & PWM_IR_CYCLE_MODE) sum_cycle += param.ir_lc * result[1].ir_ll; else sum_cycle = param.ir_lc * result[1].ir_ll; sum_cycle += num_0*((param.ir_pl0_pre&0xff) + (param.ir_pl0_post&0xff)); sum_cycle += num_1*((param.ir_pl1_pre&0xff) + (param.ir_pl1_post&0xff)); if(param.ir_Tf) { ir_sl = (param.ir_Tf/1000 - (sum_cycle/pwm_rate)); ir_sl = ir_sl * pwm_rate / param.ir_lc; result[1].ir_sl = ir_sl; } } result[0].buf_num = ctrl->buf_num; result[0].mode = ctrl->mode; } static int pwm_acts_ir_transfer(const struct device *dev, u32_t pwm, pwm_ir_ctrl_t *ctrl) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; uint32_t base, group; u16_t status; struct pwm_ir_mode_param_t result[2] = {0}; struct ir_tx_protocol_param *ir_protocol = ctrl->protocol; struct ir_tx_data_param *ir_data = ctrl->data; k_sem_take(&data->ir_transfer_sync, K_FOREVER); data->ir_event_timeout = ir_protocol->ir_lc_bit_length; pwm_acts_ir_param_cal(ir_data, result, ir_protocol); group = pwm_acts_get_group(pwm); status = data->group_init_status[group]; if((status&PWM_MODE_MASK) != PWM_IR_INIT && (status&PWM_MODE_MASK) != PWM_DEFAULT_REG) {//not ir mode or default LOG_ERR("start a ir mode but have not stop this group bfore!"); return -EINVAL; } pwm = (pwm > 5)?pwm-6:pwm; pwm = (pwm > 5)?pwm-4-group:pwm; base = pwm_acts_get_reg_base(cfg->base, group); if(group == PWM_GROUP5_REG) {//only group 5 support ir pwm_acts_ir_tx(cfg, data, pwm, result, base); } else { LOG_ERR("unsuported channel: %d",pwm); return -EINVAL; } data->group_init_status[group] = PWM_IR_INIT | PWM_chan(pwm) | status; return 0; } static int pwm_acts_ir_stop_transfer(const struct device *dev) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; struct acts_pwm_ir *pwm_ir = (struct acts_pwm_ir *)PWM_IR(cfg->base); if (pwm_ir->ir_ctl & PWM_IRCTL_START) { data->manual_stop_flag = true; pwm_ir->ir_ctl |= PWM_IRCTL_STOP; } k_sem_take(&data->ir_sync, K_FOREVER); k_sem_give(&data->ir_transfer_sync); return 0; } #ifdef CONFIG_PWM_TAI_FULL_FUNC static int pwm_acts_pin_stop(const struct device *dev, uint32_t pwm) { return 0; } #endif static int pwm_acts_pin_repeat(const struct device *dev, uint32_t pwm, uint8_t repeat) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; uint32_t base,group; u16_t status; unsigned int key; if(repeat == 0) return -EINVAL; key = irq_lock(); group = pwm_acts_get_group(pwm); status = data->group_init_status[group]; pwm = (pwm > 5)?pwm-6:pwm; pwm = (pwm > 5)?pwm-4-group:pwm; base = pwm_acts_get_reg_base(cfg->base, group); switch(group){ case PWM_GROUP0_REG: sys_write32(PWM_INTCTL_G0REPEAT, PWM_INT_CTL(cfg->base)); break; case PWM_GROUP1_REG: sys_write32(PWM_INTCTL_G1REPEAT, PWM_INT_CTL(cfg->base)); break; case PWM_GROUP2_REG: sys_write32(PWM_INTCTL_G2C0 | PWM_INTCTL_G2REPEAT, PWM_INT_CTL(cfg->base)); break; case PWM_GROUP3_REG: sys_write32(PWM_INTCTL_G3C0 | PWM_INTCTL_G3REPEAT, PWM_INT_CTL(cfg->base)); break; case PWM_GROUP4_REG: sys_write32(PWM_INTCTL_G4C0 | PWM_INTCTL_G4REPEAT, PWM_INT_CTL(cfg->base)); break; case PWM_GROUP5_REG: sys_write32(PWM_INTCTL_G5C0 | PWM_INTCTL_G5REPEAT, PWM_INT_CTL(cfg->base)); break; default: return -EINVAL; } struct acts_pwm_groupx *pwm_reg = (struct acts_pwm_groupx *)base; pwm_reg->repeat = repeat; pwm_reg->ctrl |= PWMx_CTRL_HUA; // k_sleep(K_USEC(32)); // pwm_reg->ctrl |= PWMx_CTRL_CU; sys_write32(0xffffffff, PWM_PENDING(cfg->base)); irq_unlock(key); return 0; } /* * Get the clock rate (cycles per second) for a PWM pin. * * Parameters * dev: Pointer to PWM device structure * pwm: PWM port number * cycles: Pointer to the memory to store clock rate (cycles per second) * * return 0, or negative errno code */ #ifdef CONFIG_PWM_TAI_FULL_FUNC static int pwm_acts_get_cycles_per_sec(const struct device *dev, uint32_t pwm, u64_t *cycles) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; u32_t group; group = pwm_acts_get_group(pwm); if(group == PWM_GROUP5_REG) *cycles = pwm_clk_rate; else *cycles = pwm_normal_clk_rate; return 0; } #endif static int pwm_acts_reset_peripheral(int reset_id) { sys_write32((sys_read32(RMU_MRCR0) & ~(0x3f << reset_id)), RMU_MRCR0); sys_write32(((0x3f << reset_id) | sys_read32(RMU_MRCR0)), RMU_MRCR0); return 0; } static int pwm_acts_ir_tx_power(const struct device *dev, uint8_t level) { struct pwm_acts_data *data = dev->data; if(level > 7) { LOG_ERR("error param level:%d", level); return -1; } data->ir_pout = level; return 0; } int pwm_acts_init(const struct device *dev) { const struct pwm_acts_config *cfg = dev->config; struct pwm_acts_data *data = dev->data; acts_pinmux_setup_pins(cfg->pinmux, cfg->pinmux_size - 1); /* reset pwm controller */ pwm_acts_reset_peripheral(cfg->reset_id); cfg->irq_config_func(); for(int i = 0;i < 5;i++) { /* enable pwm controller clock */ acts_clock_peripheral_enable(cfg->clock_id + i); /* 32kHZ */ pwm_acts_set_clk(cfg, i, pwm_normal_clk_rate); } acts_clock_peripheral_enable(cfg->clock_id + 5); pwm_acts_set_clk(cfg, 5, pwm_clk_rate);//pwm_normal_clk_rate);// memset(data->group_init_status, 0 ,6); k_sem_init(&data->ir_sync, 0, 1); k_sem_init(&data->ir_transfer_sync, 0, 1); k_sem_give(&data->ir_transfer_sync); if (cfg->dma_dev_name != NULL) { data->dma_dev = (struct device *)device_get_binding(cfg->dma_dev_name); if (!data->dma_dev) { LOG_ERR("Bind DMA device %s error", cfg->dma_dev_name); return -ENOENT; } k_sem_init(&data->dma_sync, 0, 1); data->dma_chan = dma_request(data->dma_dev, 0xff); if(data->dma_chan < 0){ LOG_ERR("dma-dev rxchan config err chan=%d\n", data->dma_chan); return -ENODEV; } } data->ir_pout = 0; data->manual_stop_flag = false; k_timer_init(&data->timer, pwm_acts_ir_timeout_event, NULL); k_timer_user_data_set(&data->timer, (void *)data); return 0; } const struct pwm_driver_api pwm_acts_driver_api = { .pin_set = pwm_acts_pin_set, .ir_transfer = pwm_acts_ir_transfer, .ir_stop_transfer = pwm_acts_ir_stop_transfer, .pin_repeat = pwm_acts_pin_repeat, .ir_tx_power_set = pwm_acts_ir_tx_power, #ifdef CONFIG_PWM_TAI_FULL_FUNC .get_cycles_per_sec = pwm_acts_get_cycles_per_sec, .set_breath = pwm_acts_set_breath_mode, .set_program = pwm_acts_set_program_mode, .pin_stop = pwm_acts_pin_stop, #endif }; static struct pwm_acts_data pwm_acts_data; static const struct acts_pin_config pins_pwm[] = {CONFIG_PWM_MFP}; static void pwm_acts_irq_config(void); static const struct pwm_acts_config pwm_acts_config = { .base = PWM_REG_BASE, .cycle = CONFIG_PWM_CYCLE, .clock_id = CLOCK_ID_PWM0, .reset_id = RESET_ID_PWM0, .dma_dev_name = CONFIG_DMA_0_NAME, .txdma_id = CONFIG_PWM_DMA_ID, .pinmux = pins_pwm, .irq_config_func = pwm_acts_irq_config, .pinmux_size = ARRAY_SIZE(pins_pwm), }; #if CONFIG_PWM static void pwm_acts_pinmux_setup_pins(const struct acts_pin_config *pinconf, int pins, u8_t flag) { int i; for (i = 0; i < pins; i++) { if(flag) acts_pinmux_set(pinconf[i].pin_num, pinconf[i].mode); else acts_pinmux_set(pinconf[i].pin_num, 0x1000); } } static int pwm_acts_active(const struct device *dev) { const struct pwm_acts_config *cfg = dev->config; pwm_acts_pinmux_setup_pins(cfg->pinmux, cfg->pinmux_size - 1, 1); pwm_acts_reset_peripheral(cfg->reset_id); cfg->irq_config_func(); for(int i = 0;i < 5;i++) { acts_clock_peripheral_enable(cfg->clock_id + i); pwm_acts_set_clk(cfg, i, pwm_normal_clk_rate); } acts_clock_peripheral_enable(cfg->clock_id + 5); pwm_acts_set_clk(cfg, 5, pwm_clk_rate);//pwm_normal_clk_rate);// return 0; } static int pwm_acts_suspend(const struct device *dev) { struct pwm_acts_data *data = dev->data; const struct pwm_acts_config *cfg = dev->config; u16_t status; for(int i = 0; i < PWM_GROUP_MAX; i++) { status = data->group_init_status[i] & PWM_MODE_MASK; if((status != PWM_DEFAULT_REG) &&(status != PWM_FIX_INIT)) { return -ESRCH; } status = data->group_init_status[i]; if((PWM_FIX_INIT == (status & PWM_MODE_MASK)) && ((status & PWM_chan_act_MASK) != 0)) { return -ESRCH; } } pwm_acts_pinmux_setup_pins(cfg->pinmux, cfg->pinmux_size, 0); memset(data->group_init_status, 0 ,6); sys_write32((sys_read32(RMU_MRCR0) & ~(0x3f << cfg->reset_id)), RMU_MRCR0); for(int i = 0; i < PWM_GROUP_MAX; i++) acts_clock_peripheral_disable(cfg->clock_id + i); return 0; } static int pwm_acts_pm_control(const struct device *dev, uint32_t command, void *context, device_pm_cb cb, void *arg) { int ret = 0; uint32_t state = *((uint32_t*)context); static u8_t sleep_status = 1; LOG_DBG("command:0x%x state:%d", command, state); if (command != DEVICE_PM_SET_POWER_STATE) return 0; switch (state) { case DEVICE_PM_ACTIVE_STATE: if(sleep_status == 0) pwm_acts_active(dev); sleep_status = 1; break; case DEVICE_PM_SUSPEND_STATE: ret = pwm_acts_suspend(dev); if(ret == 0) sleep_status = 0; break; case DEVICE_PM_EARLY_SUSPEND_STATE: case DEVICE_PM_LATE_RESUME_STATE: case DEVICE_PM_LOW_POWER_STATE: case DEVICE_PM_OFF_STATE: break; default: ret = -ESRCH; } return ret; } DEVICE_DEFINE(pwm_acts, CONFIG_PWM_NAME, pwm_acts_init, pwm_acts_pm_control, &pwm_acts_data, &pwm_acts_config, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &pwm_acts_driver_api); static void pwm_acts_irq_config(void) { IRQ_CONNECT(IRQ_ID_PWM, CONFIG_PWM_IRQ_PRI, pwm_acts_isr, DEVICE_GET(pwm_acts), 0); irq_enable(IRQ_ID_PWM); //IRQ_CONNECT(IRQ_ID_KEY_WAKEUP, 1, // mxkeypad_acts_wakeup_isr, DEVICE_GET(mxkeypad_acts), 0); } #endif请记住上面的代码
06-24
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2021, Intel Corporation. #include <linux/bitfield.h> #include <linux/crc8.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/regmap.h> #include <uapi/linux/aspeed-espi-mmbi.h> #include <dt-bindings/mmbi/protocols.h> #define DEVICE_NAME "mmbi" #define MAX_NO_OF_SUPPORTED_CHANNELS 8 #define MAX_NO_OF_SUPPORTED_PROTOCOLS 5 /* 20 Bits for H2B/B2H Write/Read Pointers */ #define H2B_WRITE_POINTER_MASK GENMASK(19, 0) #define B2H_READ_POINTER_MASK GENMASK(19, 0) #define MMBI_HDR_LENGTH_MASK GENMASK(23, 0) #define MMBI_HDR_TYPE_MASK GENMASK(31, 24) #define HOST_RESET_REQUEST_BIT BIT(31) #define HOST_READY_BIT BIT(31) #define ESPI_SCI_STATUS_BIT BIT(24) #define GET_H2B_WRITE_POINTER(x) ((x) & H2B_WRITE_POINTER_MASK) #define GET_B2H_READ_POINTER(x) ((x) & B2H_READ_POINTER_MASK) #define GET_HOST_RESET_REQ_BIT(x) ((x) & HOST_RESET_REQUEST_BIT) #define GET_HOST_READY_BIT(x) ((x) & HOST_READY_BIT) #define HOST_READ_SCI_STATUS_BIT(x) ((x) & ESPI_SCI_STATUS_BIT) #define MMBI_CRC8_POLYNOMIAL 0x07 DECLARE_CRC8_TABLE(mmbi_crc8_table); typedef u8 protocol_type; struct host_rop { unsigned int b2h_wp : 20; /* Offset where BMC can write next data in B2H */ unsigned int reserved1 : 11; unsigned int b_rdy : 1; /* BMC ready bit */ unsigned int h2b_rp : 20; /* Offset till where bmc read data in H2B */ unsigned int reserved2 : 11; unsigned int b_rst : 1; /* BMC reset request bit */ }; struct host_rwp { unsigned int h2b_wp : 20; /* Offset where HOST can write next data in H2B */ unsigned int reserved1 : 11; unsigned int h_rdy : 1; /* Host ready bit */ unsigned int b2h_rp : 20; /* Offset till where host read data in B2H */ unsigned int reserved2 : 11; unsigned int h_rst : 1; /* host reset request bit */ }; struct buffer_type_desc { u32 host_rop_p; u32 host_rwp_p; u8 msg_protocol_type; u8 host_int_type; u16 global_sys_interrupt; u8 bmc_int_type; u32 bmc_int_a; u8 bmc_int_v; } __packed; struct mmbi_cap_desc { u8 signature[6]; u8 version; u8 instance_num; u32 nex_inst_base_addr; u32 b2h_ba; /* B2H buffer base offset (i.e 0x48) */ u32 h2b_ba; /* H2B buffer base offset (i.e 0x08) */ u16 b2h_d; /* Multiple of 16 Bytes (Max 1MB) */ u16 h2b_d; /* multiples of 16 bytes (Max 1MB) */ u8 buffer_type; /* Type of buffer in B2H/H2B */ u8 reserved1[7]; struct buffer_type_desc bt_desc; /* 18 bytes */ u8 reserved2[13]; u8 crc8; /* CRC-8-CCITT of the whole data structure (bytes 0 to 62) */ } __packed; struct mmbi_header { u32 data; }; struct aspeed_mmbi_protocol { struct miscdevice miscdev; struct aspeed_mmbi_channel *chan_ref; protocol_type type; bool data_available; /* * If user space application is opened for read, then only process * the data and copy to userspace. Otherwise, discard the command and * process the remaining commands (can be different protocol type) */ bool process_data; wait_queue_head_t queue; }; struct aspeed_mmbi_channel { struct aspeed_mmbi_protocol protocol[MAX_NO_OF_SUPPORTED_PROTOCOLS]; struct aspeed_espi_mmbi *priv; u8 chan_num; u8 supported_protocols[MAX_NO_OF_SUPPORTED_PROTOCOLS]; u32 b2h_cb_size; u32 h2b_cb_size; u8 *desc_vmem; u8 *hrop_vmem; u8 *b2h_cb_vmem; u8 *hrwp_vmem; u8 *h2b_cb_vmem; bool enabled; }; struct aspeed_espi_mmbi { struct regmap *map; struct regmap *pmap; struct regmap *lpc_map; struct device *dev; int irq; phys_addr_t host_map_addr; dma_addr_t mmbi_phys_addr; resource_size_t mmbi_size; u8 *dma_vaddr; struct aspeed_mmbi_channel chan[MAX_NO_OF_SUPPORTED_CHANNELS]; }; static const struct regmap_config aspeed_espi_mmbi_regmap_cfg = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x04C, }; static void raise_sci_interrupt(struct aspeed_mmbi_channel *channel) { u32 val; int retry; struct regmap *lpc_regmap = channel->priv->lpc_map; dev_dbg(channel->priv->dev, "Raising SCI interrupt...\n"); regmap_write_bits(lpc_regmap, AST_LPC_ACPIB7B4, LPC_BMC_TRIG_SCI_EVT_EN, LPC_BMC_TRIG_SCI_EVT_EN); regmap_write_bits(lpc_regmap, AST_LPC_SWCR0704, LPC_BMC_TRIG_WAKEUP_EVT_EN, LPC_BMC_TRIG_WAKEUP_EVT_EN); regmap_write_bits(lpc_regmap, AST_LPC_SWCR0B08, LPC_BMC_TRIG_WAKEUP_EVT, LPC_BMC_TRIG_WAKEUP_EVT); /* * Just asserting the SCI VW will trigger the SCI event continuosly. * So BMC must deassert SCI VW to avoid it. * ESPI098[24] reading will confirm Host read data or not. * - 0 means host read the data * - 1 means host not yet read data, so retry with 1us delay. */ retry = 30; while (retry) { if (regmap_read(channel->priv->pmap, ASPEED_ESPI_SYS_EVENT, &val)) { dev_err(channel->priv->dev, "Unable to read ESPI098\n"); break; } if (HOST_READ_SCI_STATUS_BIT(val) == 0) break; retry--; dev_dbg(channel->priv->dev, "Host SCI handler not invoked(ESPI098: 0x%0x), so retry(%d) after 1us...\n", val, retry); udelay(1); } regmap_write_bits(lpc_regmap, AST_LPC_SWCR0300, LPC_BMC_TRIG_WAKEUP_EVT_STS, LPC_BMC_TRIG_WAKEUP_EVT_STS); regmap_write_bits(lpc_regmap, AST_LPC_ACPIB3B0, LPC_BMC_TRIG_SCI_EVT_STS, LPC_BMC_TRIG_SCI_EVT_STS); } static int read_host_rwp_val(struct aspeed_mmbi_channel *channel, u32 reg, u32 *val) { int rc; rc = regmap_read(channel->priv->map, reg, val); if (rc) { dev_err(channel->priv->dev, "Unable to read Host RWP pointer\n"); return rc; } return 0; } static int get_b2h_avail_buf_len(struct aspeed_mmbi_channel *channel, ssize_t *avail_buf_len) { struct host_rop hrop; u32 b2h_rp, h_rwp1; if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } b2h_rp = GET_B2H_READ_POINTER(h_rwp1); dev_dbg(channel->priv->dev, "MMBI HRWP - b2h_rp: 0x%0x\n", b2h_rp); memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); dev_dbg(channel->priv->dev, "HROP - b2h_wp: 0x%0x, h2b_rp: 0x%0x", hrop.b2h_wp, hrop.h2b_rp); if (hrop.b2h_wp >= b2h_rp) *avail_buf_len = channel->b2h_cb_size - hrop.b2h_wp + b2h_rp; else *avail_buf_len = b2h_rp - hrop.b2h_wp; return 0; } static int get_mmbi_header(struct aspeed_mmbi_channel *channel, u32 *data_length, u8 *type, u32 *unread_data_len) { u32 h2b_wp, b2h_rp, h_rwp0, h_rwp1; struct mmbi_header header; struct host_rop hrop; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); dev_dbg(channel->priv->dev, "MMBI HROP - b2h_wp: 0x%0x, h2b_rp: 0x%0x\n", hrop.b2h_wp, hrop.h2b_rp); if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } h2b_wp = GET_H2B_WRITE_POINTER(h_rwp0); b2h_rp = GET_B2H_READ_POINTER(h_rwp1); dev_dbg(channel->priv->dev, "MMBI HRWP - h2b_wp: 0x%0x, b2h_rp: 0x%0x\n", h2b_wp, b2h_rp); if (h2b_wp >= hrop.h2b_rp) *unread_data_len = h2b_wp - hrop.h2b_rp; else *unread_data_len = channel->h2b_cb_size - hrop.h2b_rp + h2b_wp; if (*unread_data_len < sizeof(struct mmbi_header)) { dev_dbg(channel->priv->dev, "No data to read(%d -%d)\n", h2b_wp, hrop.h2b_rp); return -EAGAIN; } dev_dbg(channel->priv->dev, "READ MMBI header from: 0x%0x\n", (u32)(channel->h2b_cb_vmem + hrop.h2b_rp)); /* Extract MMBI protocol - protocol type and length */ if ((hrop.h2b_rp + sizeof(header)) <= channel->h2b_cb_size) { memcpy(&header, channel->h2b_cb_vmem + hrop.h2b_rp, sizeof(header)); } else { ssize_t chunk_len = channel->h2b_cb_size - hrop.h2b_rp; memcpy(&header, channel->h2b_cb_vmem + hrop.h2b_rp, chunk_len); memcpy(((u8 *)&header) + chunk_len, channel->h2b_cb_vmem, sizeof(header) - chunk_len); } *data_length = FIELD_GET(MMBI_HDR_LENGTH_MASK, header.data); *type = FIELD_GET(MMBI_HDR_TYPE_MASK, header.data); return 0; } static void raise_missing_sci(struct aspeed_mmbi_channel *channel) { struct host_rop hrop; u32 h_rwp0, h_rwp1, b2h_rptr; /* Rise SCI only if Host is READY (h_rdy is 1). */ if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return; } if (!GET_HOST_READY_BIT(h_rwp0)) { // Host is not ready, no point in raising the SCI return; } memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return; } b2h_rptr = GET_B2H_READ_POINTER(h_rwp1); if (hrop.b2h_wp == b2h_rptr) { // Host has read all outstanding SCI data, // Do not raise another SCI. return; } dev_dbg(channel->priv->dev, "Host not read the data yet, so rising SCI interrupt again...\n"); raise_sci_interrupt(channel); } static void update_host_rop(struct aspeed_mmbi_channel *channel, unsigned int w_len, unsigned int r_len) { struct host_rop hrop; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); dev_dbg(channel->priv->dev, "MMBI HROP - b2h_wp: 0x%0x, h2b_rp: 0x%0x\n", hrop.b2h_wp, hrop.h2b_rp); /* Advance the B2H CB offset for next write */ if ((hrop.b2h_wp + w_len) <= channel->b2h_cb_size) hrop.b2h_wp += w_len; else hrop.b2h_wp = hrop.b2h_wp + w_len - channel->b2h_cb_size; /* Advance the H2B CB offset till where BMC read data */ if ((hrop.h2b_rp + r_len) <= channel->h2b_cb_size) hrop.h2b_rp += r_len; else hrop.h2b_rp = hrop.h2b_rp + r_len - channel->h2b_cb_size; /* * Clear BMC reset request state its set: * Set BMC reset request bit to 0 * Set BMC ready bit to 1 */ if (hrop.b_rst) { dev_dbg(channel->priv->dev, "Clearing BMC reset request state\n"); hrop.b_rst = 0; hrop.b_rdy = 1; } dev_dbg(channel->priv->dev, "Updating HROP - h2b_rp: 0x%0x, b2h_wp: 0x%0x\n", hrop.h2b_rp, hrop.b2h_wp); memcpy(channel->hrop_vmem, &hrop, sizeof(hrop)); /* * Raise SCI interrupt only if B2H buffer is updated * Don&#39;t raise SCI, after BMC read the H2B buffer */ if (w_len != 0) raise_sci_interrupt(channel); } static int send_bmc_reset_request(struct aspeed_mmbi_channel *channel) { struct host_rop hrop; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); /* * Send MMBI buffer reset request: First BMC should clear its own * pointers, set Reset bit and reset BMC ready bit. * B2H Write pointer - must be set to zero * H2B read pointer - must be set to zero * BMC ready bit - Set to 0 * BMC reset bit - Set to 1 */ hrop.b2h_wp = 0; hrop.h2b_rp = 0; hrop.b_rdy = 0; hrop.b_rst = 1; dev_info(channel->priv->dev, "Send BMC reset request on MMBI channel(%d)\n", channel->chan_num); memcpy(channel->hrop_vmem, &hrop, sizeof(hrop)); /* Raise SCI interrupt */ raise_sci_interrupt(channel); return 0; } void check_host_reset_request(struct aspeed_mmbi_channel *channel) { struct host_rop hrop; u32 h_rwp1; if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return; } /* If its not host reset request, just discard */ if (!GET_HOST_RESET_REQ_BIT(h_rwp1)) return; /* Host requested for MMBI buffer reset */ memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); /* * When host request for reset MMBI buffer: * B2H Write pointer - must be set to zero * H2B read pointer - must be set to zero * BMC ready bit - No change (Set to 1) * BMC reset bit - No change (Set to 0) */ hrop.b2h_wp = 0; hrop.h2b_rp = 0; dev_info(channel->priv->dev, "Handle Host reset request on MMBI channel(%d)\n", channel->chan_num); memcpy(channel->hrop_vmem, &hrop, sizeof(hrop)); } void wake_up_device(struct aspeed_mmbi_channel *channel) { u32 req_data_len, unread_data_len; u8 type; int i; if (0 != get_mmbi_header(channel, &req_data_len, &type, &unread_data_len)) { /* Bail out as we can&#39;t read header */ return; } dev_dbg(channel->priv->dev, "%s: Length: 0x%0x, Protocol Type: %d\n", __func__, req_data_len, type); for (i = 0; channel->supported_protocols[i] != 0; i++) { if (type == channel->supported_protocols[i]) { /* * MMBI supports multiple protocols on each channel * If userspace application is not opened the device * for read /write the data, discard the data and * advance the HROP for processing next command. */ if (channel->protocol[i].process_data) { channel->protocol[i].data_available = true; wake_up(&channel->protocol[i].queue); } else { /* Discard data and advance the hrop */ update_host_rop(channel, 0, req_data_len + sizeof(struct mmbi_header)); } /* * Raise the missing SCI&#39;s by checking pointer for host * read acknowledgment. This will work around the Missing * SCI bug on host side. */ dev_warn(channel->priv->dev, "%s: Check and raise missing SCI\n", __func__); raise_missing_sci(channel); } } } static struct aspeed_mmbi_protocol *file_aspeed_espi_mmbi(struct file *file) { return container_of(file->private_data, struct aspeed_mmbi_protocol, miscdev); } static int mmbi_open(struct inode *inode, struct file *filp) { return 0; } static int mmbi_release(struct inode *inode, struct file *filp) { return 0; } static unsigned int mmbi_poll(struct file *filp, poll_table *wait) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); poll_wait(filp, &protocol->queue, wait); return protocol->data_available ? POLLIN : 0; } static ssize_t mmbi_read(struct file *filp, char *buff, size_t count, loff_t *offp) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); struct aspeed_mmbi_channel *channel = protocol->chan_ref; struct aspeed_espi_mmbi *priv = channel->priv; struct host_rop hrop; ssize_t rd_offset, rd_len; ssize_t ret; u32 unread_data_len, req_data_len; u8 type; protocol->process_data = true; if (!protocol->data_available && (filp->f_flags & O_NONBLOCK)) { // Work around: The lack of response might be cause by missing SCI // (host didn&#39;t consume the last message), check the buffer state // and retry if it&#39;s needed raise_missing_sci(channel); return -EAGAIN; } dev_dbg(priv->dev, "%s: count:%d, Type: %d\n", __func__, count, protocol->type); ret = wait_event_interruptible(protocol->queue, protocol->data_available); if (ret == -ERESTARTSYS) { ret = -EINTR; goto err_out; } ret = get_mmbi_header(channel, &req_data_len, &type, &unread_data_len); if (ret != 0) { /* Bail out as we can&#39;t read header. */ goto err_out; } dev_dbg(priv->dev, "%s: Length: 0x%0x, Protocol Type: %d, Unread data: %d\n", __func__, req_data_len, type, unread_data_len); if (req_data_len > count) { dev_err(priv->dev, "Data exceeding user space limit: %d\n", count); ret = -EFAULT; /* Discard data and advance the hrop */ update_host_rop(channel, 0, req_data_len + sizeof(struct mmbi_header)); goto err_out; } /* Check is data belongs to this device, if not wake_up corresponding device. */ if (type != protocol->type) { ret = -EFAULT; goto err_out; } memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); if ((hrop.h2b_rp + sizeof(struct mmbi_header)) <= channel->h2b_cb_size) { rd_offset = hrop.h2b_rp + sizeof(struct mmbi_header); } else { rd_offset = hrop.h2b_rp + sizeof(struct mmbi_header) - channel->h2b_cb_size; } rd_len = req_data_len; /* Extract data and copy to user space application */ dev_dbg(priv->dev, "READ MMBI Data from: 0x%0x and length: %d\n", (u32)(channel->h2b_cb_vmem + rd_offset), rd_len); if (unread_data_len < sizeof(struct mmbi_header) + rd_len) { dev_err(priv->dev, "Invalid H2B buffer (Read msg length: %d)\n", rd_len); ret = -EFAULT; goto err_out; } if ((channel->h2b_cb_size - rd_offset) >= rd_len) { if (copy_to_user(buff, channel->h2b_cb_vmem + rd_offset, rd_len)) { dev_err(priv->dev, "Failed to copy data to user space\n"); ret = -EFAULT; goto err_out; } rd_offset += rd_len; } else { ssize_t chunk_len; chunk_len = channel->h2b_cb_size - rd_offset; if (copy_to_user(buff, channel->h2b_cb_vmem + rd_offset, chunk_len)) { dev_err(priv->dev, "Failed to copy data to user space\n"); ret = -EFAULT; goto err_out; } rd_offset = 0; if (copy_to_user(buff + chunk_len, channel->h2b_cb_vmem + rd_offset, rd_len - chunk_len)) { dev_err(priv->dev, "Failed to copy data to user space\n"); ret = -EFAULT; goto err_out; } rd_offset += (rd_len - chunk_len); } *offp += rd_len; ret = rd_len; update_host_rop(channel, 0, rd_len + sizeof(struct mmbi_header)); dev_dbg(priv->dev, "%s: Return length: %d\n", __func__, ret); err_out: /* * Raise the missing SCI&#39;s by checking pointer for host * read acknowledgment. This will work around the Missing * SCI bug on host side. * */ dev_warn(priv->dev, "%s: Check and raise missing SCI\n", __func__); raise_missing_sci(channel); protocol->data_available = false; wake_up_device(channel); return ret; } static ssize_t mmbi_write(struct file *filp, const char *buffer, size_t len, loff_t *offp) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); struct aspeed_mmbi_channel *channel = protocol->chan_ref; struct aspeed_espi_mmbi *priv = channel->priv; struct mmbi_header header; struct host_rop hrop; ssize_t wt_offset; ssize_t avail_buf_len; ssize_t chunk_len; ssize_t end_offset; u32 h_rwp0; dev_dbg(priv->dev, "%s: length:%d , type: %d\n", __func__, len, protocol->type); if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } /* If Host READY bit is not set, Just discard the write. */ if (!GET_HOST_READY_BIT(h_rwp0)) { dev_dbg(channel->priv->dev, "Host not ready, discarding request...\n"); return -EAGAIN; } if (get_b2h_avail_buf_len(channel, &avail_buf_len)) { dev_dbg(priv->dev, "Failed to B2H empty buffer len\n"); return -EAGAIN; } dev_dbg(priv->dev, "B2H buffer empty space: %d\n", avail_buf_len); /* Empty space should be more than write request data size */ if (avail_buf_len <= sizeof(header) || (len > (avail_buf_len - sizeof(header)))) { dev_err(priv->dev, "Not enough space(%d) in B2H buffer\n", avail_buf_len); return -ENOSPC; } /* Fill multi-protocol header */ header.data = ((protocol->type << 24) + len); memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); wt_offset = hrop.b2h_wp; end_offset = channel->b2h_cb_size; if ((end_offset - wt_offset) >= sizeof(header)) { memcpy(channel->b2h_cb_vmem + wt_offset, &header, sizeof(header)); wt_offset += sizeof(header); } else { chunk_len = end_offset - wt_offset; memcpy(channel->b2h_cb_vmem + wt_offset, &header, chunk_len); memcpy(channel->b2h_cb_vmem, &header + chunk_len, (sizeof(header) - chunk_len)); wt_offset = (sizeof(header) - chunk_len); } /* Write the data */ if ((end_offset - wt_offset) >= len) { if (copy_from_user(&channel->b2h_cb_vmem[wt_offset], buffer, len)) { return -EFAULT; } wt_offset += len; } else { chunk_len = end_offset - wt_offset; if (copy_from_user(&channel->b2h_cb_vmem[wt_offset], buffer, chunk_len)) { return -EFAULT; } wt_offset = 0; if (copy_from_user(&channel->b2h_cb_vmem[wt_offset], buffer + chunk_len, len - chunk_len)) { return -EFAULT; } wt_offset += len - chunk_len; } *offp += len; update_host_rop(channel, len + sizeof(struct mmbi_header), 0); return len; } static int get_mmbi_config(struct aspeed_mmbi_channel *channel, void __user *userbuf) { bool h_ready; struct host_rop hrop; struct aspeed_mmbi_get_config get_conf; u32 h2b_wptr, b2h_rptr, h_rwp0, h_rwp1; if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP0_INSTANCE0, &h_rwp0)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } if (read_host_rwp_val(channel, ASPEED_MMBI_HRWP1_INSTANCE0, &h_rwp1)) { dev_err(channel->priv->dev, "Failed to read Host RWP\n"); return -EAGAIN; } h2b_wptr = GET_H2B_WRITE_POINTER(h_rwp0); b2h_rptr = GET_B2H_READ_POINTER(h_rwp1); h_ready = GET_HOST_READY_BIT(h_rwp0) ? true : false; memcpy(&hrop, channel->hrop_vmem, sizeof(struct host_rop)); get_conf.h_rdy = h_ready; get_conf.h2b_wp = h2b_wptr; get_conf.b2h_rp = b2h_rptr; get_conf.h2b_rp = hrop.h2b_rp; get_conf.b2h_wp = hrop.b2h_wp; if (copy_to_user(userbuf, &get_conf, sizeof(get_conf))) { dev_err(channel->priv->dev, "copy to user failed\n"); return -EFAULT; } return 0; } static int get_b2h_empty_space(struct aspeed_mmbi_channel *channel, void __user *userbuf) { struct aspeed_mmbi_get_empty_space empty_space; ssize_t avail_buf_len; if (get_b2h_avail_buf_len(channel, &avail_buf_len)) { dev_dbg(channel->priv->dev, "Failed to B2H empty buffer len\n"); return -EAGAIN; } dev_dbg(channel->priv->dev, "B2H buffer empty space: %d\n", avail_buf_len); empty_space.length = avail_buf_len; if (copy_to_user(userbuf, &empty_space, sizeof(empty_space))) { dev_err(channel->priv->dev, "copy to user failed\n"); return -EFAULT; } return 0; } static long mmbi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct aspeed_mmbi_protocol *protocol = file_aspeed_espi_mmbi(filp); struct aspeed_mmbi_channel *channel = protocol->chan_ref; void __user *userbuf = (void __user *)arg; int ret; switch (cmd) { case ASPEED_MMBI_CTRL_IOCTL_GET_B2H_EMPTY_SPACE: ret = get_b2h_empty_space(channel, userbuf); break; case ASPEED_MMBI_CTRL_IOCTL_SEND_RESET_REQUEST: ret = send_bmc_reset_request(channel); break; case ASPEED_MMBI_CTRL_IOCTL_GET_CONFIG: ret = get_mmbi_config(channel, userbuf); break; default: dev_err(channel->priv->dev, "Command not found\n"); ret = -ENOTTY; } return ret; } static const struct file_operations aspeed_espi_mmbi_fops = { .owner = THIS_MODULE, .open = mmbi_open, .release = mmbi_release, .read = mmbi_read, .write = mmbi_write, .unlocked_ioctl = mmbi_ioctl, .poll = mmbi_poll }; static char *get_protocol_suffix(protocol_type type) { switch (type) { case MMBI_PROTOCOL_IPMI: return "ipmi"; case MMBI_PROTOCOL_SEAMLESS: return "seamless"; case MMBI_PROTOCOL_RAS_OFFLOAD: return "ras_offload"; case MMBI_PROTOCOL_MCTP: return "mctp"; case MMBI_PROTOCOL_NODE_MANAGER: return "nm"; } return NULL; } static struct mmbi_cap_desc mmbi_desc_init(struct aspeed_mmbi_channel channel) { struct mmbi_cap_desc ch_desc; memset(&ch_desc, 0, sizeof(ch_desc)); /* Per MMBI protoco spec, Set it to "$MMBI$" */ strncpy(ch_desc.signature, "$MMBI$", sizeof(ch_desc.signature)); ch_desc.version = 1; ch_desc.instance_num = channel.chan_num; /* * TODO: Add multi-channel support. Handcoded H2B start offset * to 0x8000 as we support single channel today. */ ch_desc.nex_inst_base_addr = 0; ch_desc.b2h_ba = sizeof(struct mmbi_cap_desc) + sizeof(struct host_rop); ch_desc.h2b_ba = 0x8000 + sizeof(struct host_rwp); ch_desc.b2h_d = 0x800; /* 32KB = 0x800 * 16 */ ch_desc.h2b_d = 0x800; /* 32KB = 0x800 * 16 */ ch_desc.buffer_type = 0x01; /* VMSCB */ ch_desc.bt_desc.host_rop_p = sizeof(struct mmbi_cap_desc); ch_desc.bt_desc.host_rwp_p = 0x8000; ch_desc.bt_desc.msg_protocol_type = 0x01; /* Multiple protocol type */ ch_desc.bt_desc.host_int_type = 0x01; /* SCI Triggered through eSPI VW */ ch_desc.bt_desc.global_sys_interrupt = 0x00; /* Not used */ ch_desc.bt_desc.bmc_int_type = 0x00; /* Auto - AST HW Interrupt */ ch_desc.bt_desc.bmc_int_a = 0x00; /* Not used, set to zero */ ch_desc.bt_desc.bmc_int_v = 0x00; /* Not used, set to zero */ ch_desc.crc8 = crc8(mmbi_crc8_table, (u8 *)&ch_desc, (size_t)(sizeof(ch_desc) - 1), 0); return ch_desc; } static int mmbi_channel_init(struct aspeed_espi_mmbi *priv, struct device_node *node, u8 idx) { struct device *dev = priv->dev; int rc; u8 i; u8 *h2b_vaddr, *b2h_vaddr; struct mmbi_cap_desc ch_desc; struct host_rop hrop; int no_of_protocols_enabled; u8 mmbi_supported_protocols[MAX_NO_OF_SUPPORTED_PROTOCOLS]; u32 b2h_size = (priv->mmbi_size / 2); u32 h2b_size = (priv->mmbi_size / 2); b2h_vaddr = priv->dma_vaddr; h2b_vaddr = priv->dma_vaddr + (priv->mmbi_size / 2); memset(&priv->chan[idx], 0, sizeof(struct aspeed_mmbi_channel)); priv->chan[idx].chan_num = idx; priv->chan[idx].desc_vmem = b2h_vaddr; priv->chan[idx].hrop_vmem = b2h_vaddr + sizeof(struct mmbi_cap_desc); priv->chan[idx].b2h_cb_vmem = b2h_vaddr + sizeof(struct mmbi_cap_desc) + sizeof(struct host_rop); priv->chan[idx].b2h_cb_size = b2h_size - sizeof(struct mmbi_cap_desc) - sizeof(struct host_rop); /* Set BMC ready bit */ memcpy(&hrop, priv->chan[idx].hrop_vmem, sizeof(hrop)); hrop.b_rdy = 1; memcpy(priv->chan[idx].hrop_vmem, &hrop, sizeof(hrop)); priv->chan[idx].hrwp_vmem = h2b_vaddr; priv->chan[idx].h2b_cb_vmem = h2b_vaddr + sizeof(struct host_rwp); priv->chan[idx].h2b_cb_size = h2b_size - sizeof(struct host_rwp); dev_dbg(priv->dev, "B2H mapped addr - desc: 0x%0x, hrop: 0x%0x, b2h_cb: 0x%0x\n", (size_t)priv->chan[idx].desc_vmem, (size_t)priv->chan[idx].hrop_vmem, (size_t)priv->chan[idx].b2h_cb_vmem); dev_dbg(priv->dev, "H2B mapped addr - hrwp: 0x%0x, h2b_cb: 0x%0x\n", (size_t)priv->chan[idx].hrwp_vmem, (size_t)priv->chan[idx].h2b_cb_vmem); dev_dbg(priv->dev, "B2H buffer size: 0x%0x\n", (size_t)priv->chan[idx].b2h_cb_size); dev_dbg(priv->dev, "H2B buffer size: 0x%0x\n", (size_t)priv->chan[idx].h2b_cb_size); /* Initialize the MMBI channel descriptor */ ch_desc = mmbi_desc_init(priv->chan[idx]); memcpy(priv->chan[idx].desc_vmem, &ch_desc, sizeof(ch_desc)); priv->chan[idx].enabled = true; if (!node) { dev_err(priv->dev, "mmbi protocol : no instance found\n"); goto err_destroy_channel; } no_of_protocols_enabled = of_property_count_u8_elems(node, "protocols"); if (no_of_protocols_enabled <= 0 || no_of_protocols_enabled > MAX_NO_OF_SUPPORTED_PROTOCOLS){ dev_err(dev, "No supported mmbi protocol\n"); goto err_destroy_channel; } rc = of_property_read_u8_array(node, "protocols", mmbi_supported_protocols, no_of_protocols_enabled); if (!rc) { memset(&priv->chan[idx].supported_protocols, 0, sizeof(priv->chan[idx].supported_protocols)); memcpy(&priv->chan[idx].supported_protocols, mmbi_supported_protocols, sizeof(mmbi_supported_protocols)); } for (i = 0; i < no_of_protocols_enabled; i++) { char *dev_name; u8 proto_type; proto_type = priv->chan[idx].supported_protocols[i]; dev_name = get_protocol_suffix(proto_type); if (!dev_name) { dev_err(dev, "Unable to get MMBI protocol suffix name\n"); goto err_destroy_channel; } priv->chan[idx].protocol[i].type = proto_type; priv->chan[idx].protocol[i].miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d_%s", DEVICE_NAME, idx, dev_name); priv->chan[idx].protocol[i].miscdev.minor = MISC_DYNAMIC_MINOR; priv->chan[idx].protocol[i].miscdev.fops = &aspeed_espi_mmbi_fops; priv->chan[idx].protocol[i].miscdev.parent = dev; rc = misc_register(&priv->chan[idx].protocol[i].miscdev); if (rc) { dev_err(dev, "Unable to register device\n"); goto err_destroy_channel; } /* Hold the back reference of channel */ priv->chan[idx].protocol[i].chan_ref = &priv->chan[idx]; priv->chan[idx].protocol[i].data_available = false; priv->chan[idx].protocol[i].process_data = false; init_waitqueue_head(&priv->chan[idx].protocol[i].queue); } priv->chan[idx].priv = priv; /* * When BMC goes for reset while host is in OS, SRAM memory will be * remapped and the content in memory will be lost. This include * host ready state which will block memory write transactions. * Ideally this reset has to be done while mapping memory(u-boot). * Since channel initialization (including descriptor) done at kernel, * So added channel reset also during driver load. Future, when staged * commands processing(IPMI commands for BIOS-BMC communication) is * enabled, this check should be moved to u-boot. */ if (send_bmc_reset_request(&priv->chan[idx])) dev_info(dev, "MMBI channel(%d) reset failed\n", idx); dev_info(dev, "MMBI Channel(%d) initialized successfully\n", idx); return 0; err_destroy_channel: if (b2h_vaddr) memunmap(b2h_vaddr); if (h2b_vaddr) memunmap(h2b_vaddr); priv->chan[idx].enabled = false; return -ENOMEM; } static irqreturn_t aspeed_espi_mmbi_irq(int irq, void *arg) { struct aspeed_espi_mmbi *priv = arg; u32 status; int idx; regmap_read(priv->map, ASPEED_MMBI_IRQ_STATUS, &status); /* Clear interrupt */ regmap_write(priv->map, ASPEED_MMBI_IRQ_STATUS, status); for (idx = 0; idx < MAX_NO_OF_SUPPORTED_CHANNELS; idx++) { /* * Host RWP 1: It gets updated after Host reads data and also * when host want to send reset MMBI buffer request. So * Handle reset request and ignore read pointer update. * Host RWP0: It gets updated when host write data on H2B, * So process the request by invoking corresponding device. */ if (!priv->chan[idx].enabled) continue; if ((status >> (idx * 2)) & HRWP1_READ_MASK) check_host_reset_request(&priv->chan[idx]); else wake_up_device(&priv->chan[idx]); } dev_dbg(priv->dev, "MMBI IRQ Status: %d\n", status); return IRQ_HANDLED; } static const struct of_device_id aspeed_espi_mmbi_match[] = { { .compatible = "aspeed,ast2600-espi-mmbi" }, {} }; MODULE_DEVICE_TABLE(of, aspeed_espi_mmbi_match); static int aspeed_espi_mmbi_probe(struct platform_device *pdev) { const struct of_device_id *dev_id; struct aspeed_espi_mmbi *priv; struct device_node *node; struct resource resm; void __iomem *regs; u32 reg_val, enable_irqs; int rc, i; dev_dbg(&pdev->dev, "MMBI: Probing MMBI devices...\n"); priv = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_espi_mmbi), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = &pdev->dev; dev_id = of_match_device(aspeed_espi_mmbi_match, priv->dev); if (!dev_id) { dev_err(priv->dev, "MMBI: Failed to match mmbi device\n"); return -EINVAL; } regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) { dev_err(priv->dev, "MMBI: Failed to get regmap!\n"); return PTR_ERR(regs); } /* MMBI register map */ priv->map = devm_regmap_init_mmio(priv->dev, regs, &aspeed_espi_mmbi_regmap_cfg); if (IS_ERR(priv->map)) { dev_err(priv->dev, "MMBI: Couldn&#39;t get regmap\n"); return -ENODEV; } /* ESI register map */ priv->pmap = syscon_regmap_lookup_by_phandle(priv->dev->of_node, "aspeed,espi"); if (IS_ERR(priv->pmap)) { dev_err(priv->dev, "MMBI: Failed to find ESPI regmap\n"); return PTR_ERR(priv->pmap); } /* LPC register map */ priv->lpc_map = syscon_regmap_lookup_by_phandle(priv->dev->of_node, "aspeed,lpc"); if (IS_ERR(priv->lpc_map)) { dev_err(priv->dev, "MMBI: Failed to find LPC regmap\n"); return PTR_ERR(priv->lpc_map); } /* If memory-region is described in device tree then store */ node = of_parse_phandle(priv->dev->of_node, "memory-region", 0); if (node) { rc = of_property_read_u32(priv->dev->of_node, "host-map-addr", &priv->host_map_addr); if (rc) { dev_info(priv->dev, "No host mapping address\n"); priv->host_map_addr = PCH_ESPI_LGMR_BASE_ADDRESS; } rc = of_address_to_resource(node, 0, &resm); of_node_put(node); if (!rc) { priv->mmbi_size = resource_size(&resm); priv->mmbi_phys_addr = resm.start; } else { priv->mmbi_size = ESPI_MMBI_TOTAL_SIZE; priv->mmbi_phys_addr = BMC_SRAM_BASE_ADDRESS; } } else { dev_dbg(priv->dev, "No DTS config, assign default MMBI Address\n"); priv->host_map_addr = PCH_ESPI_LGMR_BASE_ADDRESS; priv->mmbi_size = ESPI_MMBI_TOTAL_SIZE; priv->mmbi_phys_addr = BMC_SRAM_BASE_ADDRESS; } dev_dbg(priv->dev, "MMBI: HostAddr:0x%x, SramAddr:0x%x, Size: 0x%0x\n", priv->host_map_addr, priv->mmbi_phys_addr, priv->mmbi_size); priv->dma_vaddr = dma_alloc_coherent(priv->dev, priv->mmbi_size, &priv->mmbi_phys_addr, GFP_KERNEL); if (!priv->dma_vaddr) { dev_err(priv->dev, "MMBI: DMA memory allocation failed\n"); return -ENOMEM; } dev_dbg(priv->dev, "MMBI: DMA Addr: 0x%x\n", (u32)priv->dma_vaddr); memset(priv->dma_vaddr, 0, priv->mmbi_size); crc8_populate_msb(mmbi_crc8_table, MMBI_CRC8_POLYNOMIAL); /* eSPI Controller settings */ regmap_write(priv->pmap, ASPEED_ESPI_PC_RX_SADDR, priv->host_map_addr); regmap_write(priv->pmap, ASPEED_ESPI_PC_RX_TADDR, priv->mmbi_phys_addr); regmap_write(priv->pmap, ASPEED_ESPI_PC_RX_TADDRM, ASPEED_ESPI_PC_RX_TADDR_MASK); regmap_update_bits(priv->pmap, ASPEED_ESPI_CTRL2, ESPI_DISABLE_PERP_MEM_READ | ESPI_DISABLE_PERP_MEM_WRITE, 0); /* MMBI controller Settings */ regmap_read(priv->map, ASPEED_MMBI_CTRL, &reg_val); regmap_read(priv->map, ASPEED_MMBI_IRQ_ENABLE, &reg_val); regmap_write(priv->map, ASPEED_MMBI_CTRL, MMBI_ENABLE_FUNCTION | MMBI_TOTAL_SIZE_64K | MMBI_INSTANCE_SIZE_64K); regmap_write(priv->map, ASPEED_MMBI_IRQ_ENABLE, 0x03); dev_set_drvdata(priv->dev, priv); for_each_child_of_node(priv->dev->of_node, node) { rc = of_property_read_u32(node, "channel", &i); if (rc || i >= MAX_NO_OF_SUPPORTED_CHANNELS || priv->chan[i].enabled) continue; rc = mmbi_channel_init(priv, node, i); if (rc) { dev_err(priv->dev, "MMBI: Channel(%d) init failed\n", i); } else { enable_irqs += (0x03 << i); } } regmap_write(priv->map, ASPEED_MMBI_IRQ_ENABLE, enable_irqs); /* Enable IRQ */ priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) { dev_err(priv->dev, "MMBI: No irq specified\n"); return priv->irq; } rc = devm_request_irq(priv->dev, priv->irq, aspeed_espi_mmbi_irq, IRQF_SHARED, dev_name(priv->dev), priv); if (rc) { dev_err(priv->dev, "MMBI: Unable to get IRQ\n"); return rc; } dev_dbg(priv->dev, "MMBI: aspeed MMBI driver loaded successfully\n"); return 0; } static int aspeed_espi_mmbi_remove(struct platform_device *pdev) { struct aspeed_espi_mmbi *priv = dev_get_drvdata(&pdev->dev); int i, j; dev_dbg(priv->dev, "MMBI: Removing MMBI device\n"); for (i = 0; i < MAX_NO_OF_SUPPORTED_CHANNELS; i++) { if (!priv->chan[i].enabled) continue; for (j = 0; priv->chan[i].supported_protocols[j] != 0; j++) misc_deregister(&priv->chan[i].protocol[j].miscdev); } if (priv->dma_vaddr) dma_free_coherent(priv->dev, priv->mmbi_size, priv->dma_vaddr, priv->mmbi_phys_addr); return 0; } static struct platform_driver aspeed_espi_mmbi_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = aspeed_espi_mmbi_match, }, .probe = aspeed_espi_mmbi_probe, .remove = aspeed_espi_mmbi_remove, }; module_platform_driver(aspeed_espi_mmbi_driver); MODULE_AUTHOR("AppaRao Puli <apparao.puli@intel.com>"); MODULE_DESCRIPTION("MMBI Driver"); MODULE_LICENSE("GPL v2");(这是我的代码,他主要是将3个寄存器冲掉了,分别是ASPEED_ESPI_PC_RX_SADDR,ASPEED_ESPI_PC_RX_TADDR,ASPEED_ESPI_PC_RX_TADDRM)
最新发布
09-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值