__schedule分析

本文详细解析了Linux内核调度器的工作原理,包括__schedule函数的实现,任务状态转换,抢占机制,以及调度器如何响应不同类型的事件。特别关注了公平调度算法(fair scheduling algorithm)的细节,如任务选择、运行时统计更新、负载均衡和预emption检查等关键操作。

先看__schedule实现 


/*
 * __schedule() is the main scheduler function.
 *
 * The main means of driving the scheduler and thus entering this function are:
 *
 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
 *
 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
 *      paths. For example, see arch/x86/entry_64.S.
 *
 *      To drive preemption between tasks, the scheduler sets the flag in timer
 *      interrupt handler scheduler_tick().
 *
 *   3. Wakeups don't really cause entry into schedule(). They add a
 *      task to the run-queue and that's it.
 *
 *      Now, if the new task added to the run-queue preempts the current
 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
 *      called on the nearest possible occasion:
 *
 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
 *
 *         - in syscall or exception context, at the next outmost
 *           preempt_enable(). (this might be as soon as the wake_up()'s
 *           spin_unlock()!)
 *
 *         - in IRQ context, return from interrupt-handler to
 *           preemptible context
 *
 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
 *         then at the next:
 *
 *          - cond_resched() call
 *          - explicit schedule() call
 *          - return from syscall or exception to user-space
 *          - return from interrupt-handler to user-space
 *
 * WARNING: must be called with preemption disabled!
 */
static void __sched notrace __schedule(bool preempt)
{
	struct task_struct *prev, *next;
	unsigned long *switch_count;
	struct rq *rq;
	int cpu;
	u64 wallclock;

	cpu = smp_processor_id();
	rq = cpu_rq(cpu);
	rcu_note_context_switch();
	prev = rq->curr;

	/*
	 * do_exit() calls schedule() with preemption disabled as an exception;
	 * however we must fix that up, otherwise the next task will see an
	 * inconsistent (higher) preempt count.
	 *
	 * It also avoids the below schedule_debug() test from complaining
	 * about this.
	 */
	if (unlikely(prev->state == TASK_DEAD))
		preempt_enable_no_resched_notrace();

	schedule_debug(prev);

	if (sched_feat(HRTICK))
		hrtick_clear(rq);

	/*
	 * Make sure that signal_pending_state()->signal_pending() below
	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
	 * done by the caller to avoid the race with signal_wake_up().
	 */
	smp_mb__before_spinlock();
	raw_spin_lock_irq(&rq->lock);
	lockdep_pin_lock(&rq->lock);

	rq->clock_skip_update <<= 1; /* promote REQ to ACT */

	switch_count = &prev->nivcsw;
	if (!preempt && prev->state) {
		if (unlikely(signal_pending_state(prev->state, prev))) {
			prev->state = TASK_RUNNING;
		} else {
				deactivate_task(rq, prev, DEQUEUE_SLEEP);
			prev->on_rq = 0;

			/*
			 * If a worker went to sleep, notify and ask workqueue
			 * whether it wants to wake up a task to maintain
			 * concurrency.
			 */
			if (prev->flags & PF_WQ_WORKER) {
				struct task_struct *to_wakeup;

				to_wakeup = wq_worker_sleeping(prev, cpu);
				if (to_wakeup)
					try_to_wake_up_local(to_wakeup);
			}
		}
		switch_count = &prev->nvcsw;
	}

	if (task_on_rq_queued(prev))
		update_rq_clock(rq);

	next = pick_next_task(rq, prev);
	wallclock = walt_ktime_clock();
	walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
	walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
	clear_tsk_need_resched(prev);
	clear_preempt_need_resched();
	rq->clock_skip_update = 0;

	if (likely(prev != next)) {
		rq->nr_switches++;
		rq->curr = next;
		++*switch_count;

		trace_sched_switch(preempt, prev, next);
		rq = context_switch(rq, prev, next); /* unlocks the rq */
		cpu = cpu_of(rq);
	} else {
		lockdep_unpin_lock(&rq->lock);
		raw_spin_unlock_irq(&rq->lock);
	}

	balance_callback(rq);
}
/*
 * Pick up the highest-prio task:
 */
static inline struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev)
{
	const struct sched_class *class = &fair_sched_class;
	struct task_struct *p;

	/*
	 * Optimization: we know that if all tasks are in
	 * the fair class we can call that function directly:
	 */
	if (likely(prev->sched_class == class &&
		   rq->nr_running == rq->cfs.h_nr_running)) {
		p = fair_sched_class.pick_next_task(rq, prev);
		if (unlikely(p == RETRY_TASK))
			goto again;

		/* assumes fair_sched_class->next == idle_sched_class */
		if (unlikely(!p))
			p = idle_sched_class.pick_next_task(rq, prev);

		return p;
	}

again:
	for_each_class(class) {
		p = class->pick_next_task(rq, prev);
		if (p) {
			if (unlikely(p == RETRY_TASK))
				goto again;
			return p;
		}
	}

	BUG(); /* the idle class will always have a runnable task */
}
#define sched_class_highest (&stop_sched_class)
#define for_each_class(class) \
   for (class = sched_class_highest; class; class = class->next)

const struct sched_class stop_sched_class = {
	.next			= &dl_sched_class,
}
const struct sched_class dl_sched_class = {
	.next			= &rt_sched_class,
}

const struct sched_class rt_sched_class = {
	.next			= &fair_sched_class,
}

const struct sched_class fair_sched_class = {
	.next			= &idle_sched_class,
}
/*
 * context_switch - switch to the new MM and the new thread's register state.
 */
static __always_inline struct rq *
context_switch(struct rq *rq, struct task_struct *prev,
	       struct task_struct *next, struct rq_flags *rf)
{
	prepare_task_switch(rq, prev, next);

	/*
	 * For paravirt, this is coupled with an exit in switch_to to
	 * combine the page table reload and the switch backend into
	 * one hypercall.
	 */
	arch_start_context_switch(prev);

	/*
	 * kernel -> kernel   lazy + transfer active
	 *   user -> kernel   lazy + mmgrab() active
	 *
	 * kernel ->   user   switch + mmdrop() active
	 *   user ->   user   switch
	 */
	if (!next->mm) {                                // to kernel
		enter_lazy_tlb(prev->active_mm, next);

		next->active_mm = prev->active_mm;
		if (prev->mm)                           // from user
			mmgrab(prev->active_mm);
		else
			prev->active_mm = NULL;
	} else {                                        // to user
		membarrier_switch_mm(rq, prev->active_mm, next->mm);
		/*
		 * sys_membarrier() requires an smp_mb() between setting
		 * rq->curr / membarrier_switch_mm() and returning to userspace.
		 *
		 * The below provides this either through switch_mm(), or in
		 * case 'prev->active_mm == next->mm' through
		 * finish_task_switch()'s mmdrop().
		 */
		switch_mm_irqs_off(prev->active_mm, next->mm, next);

		if (!prev->mm) {                        // from kernel
			/* will mmdrop() in finish_task_switch(). */
			rq->prev_mm = prev->active_mm;
			prev->active_mm = NULL;
		}
	}

	rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);

	prepare_lock_switch(rq, next, rf);

	/* Here we just switch the register state and the stack. */
	switch_to(prev, next, prev);
	barrier();

	return finish_task_switch(prev);
}

系统调用返回或从中断中返回用户空间会调用do_work_pending

asmlinkage __visible void __sched schedule(void)
{
	struct task_struct *tsk = current;

	sched_submit_work(tsk);
	do {
		preempt_disable();
		__schedule(false);
		sched_preempt_enable_no_resched();
	} while (need_resched());
}
EXPORT_SYMBOL(schedule);
asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
	/*
	 * The assembly code enters us with IRQs off, but it hasn't
	 * informed the tracing code of that for efficiency reasons.
	 * Update the trace code with the current status.
	 */
	trace_hardirqs_off();
	do {
		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
			schedule();
		} else {
			if (unlikely(!user_mode(regs)))
				return 0;
			local_irq_enable();
			if (thread_flags & _TIF_SIGPENDING) {
				int restart = do_signal(regs, syscall);
				if (unlikely(restart)) {
					/*
					 * Restart without handlers.
					 * Deal with it without leaving
					 * the kernel space.
					 */
					return restart;
				}
				syscall = 0;
			} else if (thread_flags & _TIF_UPROBE) {
				uprobe_notify_resume(regs);
			} else {
				clear_thread_flag(TIF_NOTIFY_RESUME);
				tracehook_notify_resume(regs);
			}
		}
		local_irq_disable();
		thread_flags = current_thread_info()->flags;
	} while (thread_flags & _TIF_WORK_MASK);
	return 0;
}

内核态进中断返回会调用preempt_schedule_irq 


 * this is the entry point to schedule() from kernel preemption
 * off of irq context.
 * Note, that this is called and return with irqs disabled. This will
 * protect us against recursive calling from irq.
 */
asmlinkage __visible void __sched preempt_schedule_irq(void)
{
	enum ctx_state prev_state;
 
	/* Catch callers which need to be fixed */
	BUG_ON(preempt_count() || !irqs_disabled());
 
	prev_state = exception_enter();
 
	do {
		preempt_disable();
		local_irq_enable();
		__schedule(true);
		local_irq_disable();
		sched_preempt_enable_no_resched();
	} while (need_resched());
 
	exception_exit(prev_state);

周期性调度器scheduler_tick

void scheduler_tick(void)
{
	curr->sched_class->task_tick(rq, curr, 0);
}

对于fair算法

const struct sched_class fair_sched_class = {
	.task_tick		= task_tick_fair,
};

static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;
	struct sched_domain *sd;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
		entity_tick(cfs_rq, se, queued);
	}
}

static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
{
	/*
	 * Update run-time statistics of the 'current'.
	 */
	update_curr(cfs_rq);

	/*
	 * Ensure that runnable average is periodically updated.
	 */
	update_load_avg(curr, UPDATE_TG);
	update_cfs_shares(curr);

#ifdef CONFIG_SCHED_HRTICK
	/*
	 * queued ticks are scheduled to match the slice, so don't bother
	 * validating it and just reschedule.
	 */
	if (queued) {
		resched_curr(rq_of(cfs_rq));
		return;
	}
	/*
	 * don't let the period tick interfere with the hrtick preemption
	 */
	if (!sched_feat(DOUBLE_TICK) &&
			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
		return;
#endif

	if (cfs_rq->nr_running > 1)
		check_preempt_tick(cfs_rq, curr);
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
	unsigned long ideal_runtime, delta_exec;
	struct sched_entity *se;
	s64 delta;

	ideal_runtime = sched_slice(cfs_rq, curr);
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
	if (delta_exec > ideal_runtime) {
		resched_curr(rq_of(cfs_rq));
		/*
		 * The current task ran long enough, ensure it doesn't get
		 * re-elected due to buddy favours.
		 */
		clear_buddies(cfs_rq, curr);
		return;
	}

	/*
	 * Ensure that a task that missed wakeup preemption by a
	 * narrow margin doesn't have to wait for a full slice.
	 * This also mitigates buddy induced latencies under load.
	 */
	if (delta_exec < sysctl_sched_min_granularity)
		return;

	se = __pick_first_entity(cfs_rq);
	delta = curr->vruntime - se->vruntime;

	if (delta < 0)
		return;

	if (delta > ideal_runtime)
		resched_curr(rq_of(cfs_rq));
}

/*
 * resched_curr - mark rq's current task 'to be rescheduled now'.
 *
 * On UP this means the setting of the need_resched flag, on SMP it
 * might also involve a cross-CPU call to trigger the scheduler on
 * the target CPU.
 */
void resched_curr(struct rq *rq)
{
	struct task_struct *curr = rq->curr;
	int cpu;

	lockdep_assert_held(&rq->lock);

	if (test_tsk_need_resched(curr))
		return;

	cpu = cpu_of(rq);

	if (cpu == smp_processor_id()) {
		set_tsk_need_resched(curr);
		set_preempt_need_resched();
		return;
	}

	if (set_nr_and_not_polling(curr))
		smp_send_reschedule(cpu);
	else
		trace_sched_wake_idle_without_ipi(cpu);
}

 try_to_wake_up设标志_TIF_NEED_RESCHED流程

/**
 * try_to_wake_up - wake up a thread
 * @p: the thread to be awakened
 * @state: the mask of task states that can be woken
 * @wake_flags: wake modifier flags (WF_*)
 * @sibling_count_hint: A hint at the number of threads that are being woken up
 *                      in this event.
 *
 * Put it on the run-queue if it's not already there. The "current"
 * thread is always on the run-queue (except when the actual
 * re-schedule is in progress), and as such you're allowed to do
 * the simpler "current->state = TASK_RUNNING" to mark yourself
 * runnable without the overhead of this.
 *
 * Return: %true if @p was woken up, %false if it was already running.
 * or @state didn't match @p's state.
 */
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
	       int sibling_count_hint)
{
	unsigned long flags;
	int cpu, success = 0;
#ifdef CONFIG_SMP
	struct rq *rq;
	u64 wallclock;
#endif

	/*
	 * If we are going to wake up a thread waiting for CONDITION we
	 * need to ensure that CONDITION=1 done by the caller can not be
	 * reordered with p->state check below. This pairs with mb() in
	 * set_current_state() the waiting thread does.
	 */
	smp_mb__before_spinlock();
	raw_spin_lock_irqsave(&p->pi_lock, flags);
	if (!(p->state & state))
		goto out;

	trace_sched_waking(p);

	success = 1; /* we're going to change ->state */
	cpu = task_cpu(p);

	/*
	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
	 * in smp_cond_load_acquire() below.
	 *
	 * sched_ttwu_pending()                 try_to_wake_up()
	 *   [S] p->on_rq = 1;                  [L] P->state
	 *       UNLOCK rq->lock  -----.
	 *                              \
	 *				 +---   RMB
	 * schedule()                   /
	 *       LOCK rq->lock    -----'
	 *       UNLOCK rq->lock
	 *
	 * [task p]
	 *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
	 *
	 * Pairs with the UNLOCK+LOCK on rq->lock from the
	 * last wakeup of our task and the schedule that got our task
	 * current.
	 */
	smp_rmb();
	if (p->on_rq && ttwu_remote(p, wake_flags))
		goto stat;

#ifdef CONFIG_SMP
	/*
	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
	 * possible to, falsely, observe p->on_cpu == 0.
	 *
	 * One must be running (->on_cpu == 1) in order to remove oneself
	 * from the runqueue.
	 *
	 *  [S] ->on_cpu = 1;	[L] ->on_rq
	 *      UNLOCK rq->lock
	 *			RMB
	 *      LOCK   rq->lock
	 *  [S] ->on_rq = 0;    [L] ->on_cpu
	 *
	 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
	 * from the consecutive calls to schedule(); the first switching to our
	 * task, the second putting it to sleep.
	 */
	smp_rmb();

	/*
	 * If the owning (remote) cpu is still in the middle of schedule() with
	 * this task as prev, wait until its done referencing the task.
	 */
	while (p->on_cpu)
		cpu_relax();
	/*
	 * Combined with the control dependency above, we have an effective
	 * smp_load_acquire() without the need for full barriers.
	 *
	 * Pairs with the smp_store_release() in finish_lock_switch().
	 *
	 * This ensures that tasks getting woken will be fully ordered against
	 * their previous state and preserve Program Order.
	 */
	smp_rmb();

	rq = cpu_rq(task_cpu(p));

	raw_spin_lock(&rq->lock);
	wallclock = walt_ktime_clock();
	walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	raw_spin_unlock(&rq->lock);

	p->sched_contributes_to_load = !!task_contributes_to_load(p);
	p->state = TASK_WAKING;

	if (p->sched_class->task_waking)
		p->sched_class->task_waking(p);

	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags,
			     sibling_count_hint);
	if (task_cpu(p) != cpu) {
		wake_flags |= WF_MIGRATED;
		set_task_cpu(p, cpu);
	}

#endif /* CONFIG_SMP */

	ttwu_queue(p, cpu);
stat:
	ttwu_stat(p, cpu, wake_flags);
out:
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);

	return success;
}


static void ttwu_queue(struct task_struct *p, int cpu)
{
	struct rq *rq = cpu_rq(cpu);

#if defined(CONFIG_SMP)
	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
		sched_clock_cpu(cpu); /* sync clocks x-cpu */
		ttwu_queue_remote(p, cpu);
		return;
	}
#endif

	raw_spin_lock(&rq->lock);
	lockdep_pin_lock(&rq->lock);
	ttwu_do_activate(rq, p, 0);
	lockdep_unpin_lock(&rq->lock);
	raw_spin_unlock(&rq->lock);
}

static void ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
{
	lockdep_assert_held(&rq->lock);

#ifdef CONFIG_SMP
	if (p->sched_contributes_to_load)
		rq->nr_uninterruptible--;
#endif

	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
	ttwu_do_wakeup(rq, p, wake_flags);
}

static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
	check_preempt_curr(rq, p, wake_flags);
	p->state = TASK_RUNNING;
	trace_sched_wakeup(p);

#ifdef CONFIG_SMP
	if (p->sched_class->task_woken) {
		/*
		 * Our task @p is fully woken up and running; so its safe to
		 * drop the rq->lock, hereafter rq is only used for statistics.
		 */
		lockdep_unpin_lock(&rq->lock);
		p->sched_class->task_woken(rq, p);
		lockdep_pin_lock(&rq->lock);
	}

	if (rq->idle_stamp) {
		u64 delta = rq_clock(rq) - rq->idle_stamp;
		u64 max = 2*rq->max_idle_balance_cost;

		update_avg(&rq->avg_idle, delta);

		if (rq->avg_idle > max)
			rq->avg_idle = max;

		rq->idle_stamp = 0;
	}
#endif
}

void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
	const struct sched_class *class;

	if (p->sched_class == rq->curr->sched_class) {
		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
	} else {
		for_each_class(class) {
			if (class == rq->curr->sched_class)
				break;
			if (class == p->sched_class) {
				resched_curr(rq);
				break;
			}
		}
	}

	/*
	 * A queue event has occurred, and we're going to schedule.  In
	 * this case, we can save a useless back to back clock update.
	 */
	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
		rq_clock_skip_update(rq, true);
}

/*
 * resched_curr - mark rq's current task 'to be rescheduled now'.
 *
 * On UP this means the setting of the need_resched flag, on SMP it
 * might also involve a cross-CPU call to trigger the scheduler on
 * the target CPU.
 */
void resched_curr(struct rq *rq)
{
	struct task_struct *curr = rq->curr;
	int cpu;

	lockdep_assert_held(&rq->lock);

	if (test_tsk_need_resched(curr))
		return;

	cpu = cpu_of(rq);

	if (cpu == smp_processor_id()) {
		set_tsk_need_resched(curr);
		set_preempt_need_resched();
		return;
	}

	if (set_nr_and_not_polling(curr))
		smp_send_reschedule(cpu);
	else
		trace_sched_wake_idle_without_ipi(cpu);
}

个人总结的__schedule流程如下

网上关于调度写的很详细的,参考

https://blog.youkuaiyun.com/gatieme/article/details/51456569

https://www.jianshu.com/p/d1de1f293e3b

/***************************************************************************** * Copyright (c) 2022, Nations Technologies Inc. * * All rights reserved. * ****************************************************************************/ /** * @file sys_var.c * @author Nations * @version V1.2.2 */ #include <stdio.h> #include "sys_var.h" uint8_t adc_work_mode = 0; // 工作模式,gpio,adc1,adc2 uint8_t mode_state = 0; // 工作模式 uint8_t strength_Led_state = 0; // 工作模式 uint8_t strength_Level = 0; // 工作模式 // 气囊状态 AIR_BAG_STATE air_bag_state; AIR_BAG_STATE air_bag_state_before; volatile struct _schedule_stack_ schedule_stack[STACK_SIZE] = { 0}; // 气囊工作调度步骤数 char seat_func_step_count[SEAT_FUNC_MODE_AMOUNT] = { SEAT_FUNC_STEP_COUNT_IDLE, SEAT_FUNC_STEP_COUNT_CYCLE_1, SEAT_FUNC_STEP_COUNT_CYCLE_2, SEAT_FUNC_STEP_COUNT_CYCLE_3, SEAT_FUNC_STEP_COUNT_CYCLE_4, SEAT_FUNC_STEP_COUNT_CYCLE_5, SEAT_FUNC_STEP_COUNT_CYCLE_6, SEAT_FUNC_STEP_COUNT_CYCLE_7, SEAT_FUNC_STEP_COUNT_CYCLE_8, }; // 按摩工作总时长 uint32_t work_mode_total_duration[SEAT_FUNC_MODE_AMOUNT] = { CFG_MODE_KEEP_TIME_IDLE, CFG_MODE_KEEP_TIME_CYCLE_1, CFG_MODE_KEEP_TIME_CYCLE_2, CFG_MODE_KEEP_TIME_CYCLE_3, CFG_MODE_KEEP_TIME_CYCLE_4, CFG_MODE_KEEP_TIME_CYCLE_5, CFG_MODE_KEEP_TIME_CYCLE_6, CFG_MODE_KEEP_TIME_CYCLE_7, CFG_MODE_KEEP_TIME_CYCLE_8, }; // 模式0 气囊工作调度表 struct _air_bag_schedule_ air_bag_schedule_idle[SEAT_FUNC_STEP_COUNT_IDLE] = { {0, 0, 0, 0, 0, 0, 0}, }; #ifdef FY_SEAT_4 // 模式 1 struct _air_bag_schedule_ air_bag_schedule_cycle_1[SEAT_FUNC_STEP_COUNT_CYCLE_1] = { // 简单滚动模式 {0, 3, SCHEDULE_AIR_BAG_INDEX_1, 6000, 0, 6000, 6000}, {1, 3, SCHEDULE_AIR_BAG_INDEX_2, 6000, 0, 6000, 6000}, {2, 3, SCHEDULE_AIR_BAG_INDEX_3, 6000, 0, 6000, 6000}, {3, 3, SCHEDULE_AIR_BAG_INDEX_4, 6000, 0, 6000, 6000}, }; // 模式 2 struct _air_bag_schedule_ air_bag_schedule_cycle_2[SEAT_FUNC_STEP_COUNT_CYCLE_2] = { {0, 1, SCHEDULE_AIR_BAG_INDEX_1 | SCHEDULE_AIR_BAG_INDEX_2, 6000, 0, 6000, 6000}, {1, 1, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_4, 6000, 0, 6000, 6000}, }; // 模式 3 struct _air_bag_schedule_ air_bag_schedule_cycle_3[SEAT_FUNC_STEP_COUNT_CYCLE_3] = { // 简单滚动模式 {0, 4, SCHEDULE_AIR_BAG_INDEX_1, 6000, 0, 6000, 6000}, {1, 4, SCHEDULE_AIR_BAG_INDEX_3, 6000, 0, 6000, 6000}, {2, 4, SCHEDULE_AIR_BAG_INDEX_2, 6000, 0, 6000, 6000}, {3, 4, SCHEDULE_AIR_BAG_INDEX_4, 6000, 0, 6000, 6000}, }; #else // 模式 1 struct _air_bag_schedule_ air_bag_schedule_cycle_1[SEAT_FUNC_STEP_COUNT_CYCLE_1] = { {0, 9, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, 8000, 0, 8000, 8000}, {1, 9, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, 8000, 0, 8000, 8000}, {2, 9, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, 8000, 0, 8000, 8000}, {3, 9, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, 8000, 0, 8000, 8000}, {4, 9, SCHEDULE_AIR_BAG_INDEX_2, 8000, 0, 8000, 8000}, {5, 9, SCHEDULE_AIR_BAG_INDEX_1, 8000, 0, 8000, 8000}, {6, 9, SCHEDULE_AIR_BAG_INDEX_2, 8000, 0, 8000, 8000}, {7, 9, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, 8000, 0, 8000, 8000}, {8, 9, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, 8000, 0, 8000, 8000}, {9, 9, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, 8000, 0, 8000, 8000}, // {9, 9, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, 8000, 0, 8000, 8000}, }; // 模式 2 struct _air_bag_schedule_ air_bag_schedule_cycle_2[SEAT_FUNC_STEP_COUNT_CYCLE_2] = { {0, 3, SCHEDULE_AIR_BAG_INDEX_5|SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7|SCHEDULE_AIR_BAG_INDEX_8, 10000, 0, 10000, 10000}, {1, 3, SCHEDULE_AIR_BAG_INDEX_3|SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9|SCHEDULE_AIR_BAG_INDEX_10, 10000, 0, 10000, 10000}, {2, 3, SCHEDULE_AIR_BAG_INDEX_2 | SCHEDULE_AIR_BAG_INDEX_1, 10000, 0, 10000, 10000}, {3, 3, SCHEDULE_AIR_BAG_INDEX_3|SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9|SCHEDULE_AIR_BAG_INDEX_10, 10000, 0, 10000, 10000}, // {4, 4, SCHEDULE_AIR_BAG_INDEX_5|SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7|SCHEDULE_AIR_BAG_INDEX_8, 11000, 0, 10000, 10000}, }; // 模式 3 struct _air_bag_schedule_ air_bag_schedule_cycle_3[SEAT_FUNC_STEP_COUNT_CYCLE_3] = { {0, 9, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_7, 8000, 0, 8000, 8000}, {1, 9, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, 8000, 0, 8000, 8000}, {2, 9, SCHEDULE_AIR_BAG_INDEX_3| SCHEDULE_AIR_BAG_INDEX_9, 8000, 0, 8000, 8000}, {3, 9, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, 8000, 0, 8000, 8000}, {4, 9, SCHEDULE_AIR_BAG_INDEX_2, 8000, 0, 8000, 8000}, {5, 9, SCHEDULE_AIR_BAG_INDEX_1, 8000, 0, 8000, 8000}, {6, 9, SCHEDULE_AIR_BAG_INDEX_2, 8000, 0, 8000, 8000}, {7, 9, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, 8000, 0, 8000, 8000}, {8, 9, SCHEDULE_AIR_BAG_INDEX_3| SCHEDULE_AIR_BAG_INDEX_9, 8000, 0, 8000, 8000}, {9, 9, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, 8000, 0, 8000, 8000}, }; #endif // 模式 4 struct _air_bag_schedule_ air_bag_schedule_cycle_4[SEAT_FUNC_STEP_COUNT_CYCLE_4] = { // 复杂滚动模式 {0, 12, SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {1, 12, SCHEDULE_AIR_BAG_INDEX_3, 4000, 0, 4000, 4000}, {2, 12,SCHEDULE_AIR_BAG_INDEX_4, 4000, 0, 4000, 4000}, {3, 12, SCHEDULE_AIR_BAG_INDEX_9, 4000, 0, 4000, 4000}, {4, 12, SCHEDULE_AIR_BAG_INDEX_8, 4000, 0, 4000, 4000}, {5, 12, SCHEDULE_AIR_BAG_INDEX_5, 4000, 0, 4000, 4000}, {6, 12, SCHEDULE_AIR_BAG_INDEX_6, 4000, 0, 4000, 4000}, {7, 12, SCHEDULE_AIR_BAG_INDEX_7, 4000, 0, 4000, 4000}, {8, 12, SCHEDULE_AIR_BAG_INDEX_5, 4000, 0, 4000, 4000}, {9, 12, SCHEDULE_AIR_BAG_INDEX_4, 4000, 0, 4000, 4000}, {10, 12, SCHEDULE_AIR_BAG_INDEX_9, 4000, 0, 4000, 4000}, {11, 12, SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {12, 12, SCHEDULE_AIR_BAG_INDEX_3, 4000, 0, 4000, 4000}, }; struct _air_bag_schedule_ air_bag_schedule_cycle_5[SEAT_FUNC_STEP_COUNT_CYCLE_5] = { {0, 28, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {1, 28, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, 4000, 0, 4000, 4000}, {2, 28, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, 4000, 0, 4000, 4000}, {3, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, 4000, 0, 4000, 4000}, {4, 28, SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {5, 28, SCHEDULE_AIR_BAG_INDEX_4, 4000, 0, 4000, 4000}, {6, 28, SCHEDULE_AIR_BAG_INDEX_8, 4000, 0, 4000, 4000}, {7, 28, SCHEDULE_AIR_BAG_INDEX_6, 4000, 0, 4000, 4000}, {8, 28, SCHEDULE_AIR_BAG_INDEX_7, 4000, 0, 4000, 4000}, {9, 28, SCHEDULE_AIR_BAG_INDEX_3, 4000, 0, 4000, 4000}, {10, 28, SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {11, 28, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_9, 4000, 0, 4000, 4000}, {12, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, 4000, 0, 4000, 4000}, {13, 28, SCHEDULE_AIR_BAG_INDEX_7 | SCHEDULE_AIR_BAG_INDEX_5, 4000, 0, 4000, 4000}, {14, 28, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {15, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, 4000, 0, 4000, 4000}, {16, 28, SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {17, 28, SCHEDULE_AIR_BAG_INDEX_3, 4000, 0, 4000, 4000}, {18, 28,SCHEDULE_AIR_BAG_INDEX_4, 4000, 0, 4000, 4000}, {19, 28, SCHEDULE_AIR_BAG_INDEX_9, 4000, 0, 4000, 4000}, {20, 28, SCHEDULE_AIR_BAG_INDEX_8, 4000, 0, 4000, 4000}, {21, 28, SCHEDULE_AIR_BAG_INDEX_5, 4000, 0, 4000, 4000}, {22, 28, SCHEDULE_AIR_BAG_INDEX_6, 4000, 0, 4000, 4000}, {23, 28, SCHEDULE_AIR_BAG_INDEX_7, 4000, 0, 4000, 4000}, {24, 28, SCHEDULE_AIR_BAG_INDEX_5, 4000, 0, 4000, 4000}, {25, 28, SCHEDULE_AIR_BAG_INDEX_4, 4000, 0, 4000, 4000}, {26, 28, SCHEDULE_AIR_BAG_INDEX_9, 4000, 0, 4000, 4000}, {27, 28, SCHEDULE_AIR_BAG_INDEX_10, 4000, 0, 4000, 4000}, {28, 28, SCHEDULE_AIR_BAG_INDEX_3, 4000, 0, 4000, 4000}, }; // 模式 6 struct _air_bag_schedule_ air_bag_schedule_cycle_6[SEAT_FUNC_STEP_COUNT_CYCLE_6] = { // 复杂滚动模式 {0, 1, SCHEDULE_AIR_BAG_INDEX_2 | SCHEDULE_AIR_BAG_INDEX_7, 6600, 0, 6600, 6600}, {1, 1, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_5, 6600, 0, 6600, 6600}, }; // 模式 7 struct _air_bag_schedule_ air_bag_schedule_cycle_7[SEAT_FUNC_STEP_COUNT_CYCLE_7] = { // 简单滚动模式 {0, 7, SCHEDULE_AIR_BAG_INDEX_1, 3000, 0, 3000, 3000}, {1, 7, SCHEDULE_AIR_BAG_INDEX_8, 3000, 0, 3000, 3000}, {2, 7, SCHEDULE_AIR_BAG_INDEX_2, 3000, 0, 3000, 3000}, {3, 7, SCHEDULE_AIR_BAG_INDEX_7, 3000, 0, 3000, 3000}, {4, 7, SCHEDULE_AIR_BAG_INDEX_3, 3000, 0, 3000, 3000}, {5, 7, SCHEDULE_AIR_BAG_INDEX_6, 3000, 0, 3000, 3000}, {6, 7, SCHEDULE_AIR_BAG_INDEX_4, 3000, 0, 3000, 3000}, {7, 7, SCHEDULE_AIR_BAG_INDEX_5, 3000, 0, 3000, 3000}, }; //8888888 // 模式 7 struct _air_bag_schedule_ air_bag_schedule_cycle_8[SEAT_FUNC_STEP_COUNT_CYCLE_8] = { // 简单滚动模式 {0, 2, SCHEDULE_AIR_BAG_INDEX_1, 3000, 0, 3000, 3000}, {1, 2, SCHEDULE_AIR_BAG_INDEX_8, 3000, 0, 3000, 3000}, {2, 2, SCHEDULE_AIR_BAG_INDEX_8, 3000, 0, 3000, 3000}, }; //////////////////Ç¿¶È1//////////////////////////////////////////////////////// // 模式 1 struct _air_bag_schedule_ air_bag_schedule_lvevel1_cycle_1[4] = { {0, 3, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {1, 3, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {2, 3, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {3, 3, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, }; // 模式 2 struct _air_bag_schedule_ air_bag_schedule_lvevel1_cycle_2[8] = { {0, 7, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {1, 7, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {2, 7, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {3, 7, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {4, 7, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {5, 7, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {6, 7, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {7, 7, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, }; // 模式 3 struct _air_bag_schedule_ air_bag_schedule_lvevel1_cycle_3[4] = { {0, 3, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {1, 3, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {2, 3, SCHEDULE_AIR_BAG_INDEX_7 | SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {3, 3, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, // {4, 4, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, }; // 模式 4 struct _air_bag_schedule_ air_bag_schedule_lvevel1_cycle_4[SEAT_FUNC_STEP_COUNT_CYCLE_4] = { {0, 12, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {1, 12, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {2, 12,SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {3, 12, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {4, 12, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {5, 12, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {6, 12, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {7, 12, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {8, 12, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {9, 12, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {10, 12, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {11, 12, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {12, 12, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, }; struct _air_bag_schedule_ air_bag_schedule_lvevel1_cycle_5[SEAT_FUNC_STEP_COUNT_CYCLE_5] = { {0, 28, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {1, 28, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {2, 28, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {3, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {4, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {5, 28, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {6, 28, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {7, 28, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {8, 28, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {9, 28, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {10, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {11, 28, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {12, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {13, 28, SCHEDULE_AIR_BAG_INDEX_7 | SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {14, 28, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {15, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {16, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {17, 28, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {18, 28,SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {19, 28, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {20, 28, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {21, 28, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {22, 28, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {23, 28, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {24, 28, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {25, 28, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {26, 28, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {27, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, {28, 28, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_1, 0, AIR_LEVEL_1, AIR_LEVEL_1}, }; //////////////////Ç¿¶È2//////////////////////////////////////////////////////// struct _air_bag_schedule_ air_bag_schedule_lvevel2_cycle_1[4] = { {0, 3, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {1, 3, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {2, 3, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {3, 3, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, }; // 模式 2 struct _air_bag_schedule_ air_bag_schedule_lvevel2_cycle_2[8] = { {0, 7, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {1, 7, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {2, 7, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {3, 7, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {4, 7, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {5, 7, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {6, 7, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {7, 7, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, }; // 3 struct _air_bag_schedule_ air_bag_schedule_lvevel2_cycle_3[4] = { {0, 3, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {1, 3, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {2, 3, SCHEDULE_AIR_BAG_INDEX_7 | SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {3, 3, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, //{4, 4, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, }; // 4 struct _air_bag_schedule_ air_bag_schedule_lvevel2_cycle_4[SEAT_FUNC_STEP_COUNT_CYCLE_4] = { {0, 12, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {1, 12, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {2, 12,SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {3, 12, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {4, 12, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {5, 12, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {6, 12, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {7, 12, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {8, 12, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {9, 12, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {10, 12, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {11, 12, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {12, 12, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, }; struct _air_bag_schedule_ air_bag_schedule_lvevel2_cycle_5[SEAT_FUNC_STEP_COUNT_CYCLE_5] = { {0, 28, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {1, 28, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {2, 28, SCHEDULE_AIR_BAG_INDEX_5 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {3, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {4, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {5, 28, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {6, 28, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {7, 28, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {8, 28, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {9, 28, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {10, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {11, 28, SCHEDULE_AIR_BAG_INDEX_3 | SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {12, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {13, 28, SCHEDULE_AIR_BAG_INDEX_7 | SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {14, 28, SCHEDULE_AIR_BAG_INDEX_4 | SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {15, 28, SCHEDULE_AIR_BAG_INDEX_6 | SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {16, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {17, 28, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {18, 28,SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {19, 28, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {20, 28, SCHEDULE_AIR_BAG_INDEX_8, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {21, 28, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {22, 28, SCHEDULE_AIR_BAG_INDEX_6, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {23, 28, SCHEDULE_AIR_BAG_INDEX_7, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {24, 28, SCHEDULE_AIR_BAG_INDEX_5, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {25, 28, SCHEDULE_AIR_BAG_INDEX_4, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {26, 28, SCHEDULE_AIR_BAG_INDEX_9, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {27, 28, SCHEDULE_AIR_BAG_INDEX_10, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, {28, 28, SCHEDULE_AIR_BAG_INDEX_3, AIR_LEVEL_2, 0, AIR_LEVEL_2, AIR_LEVEL_2}, }; // 工作模式 调度表 struct _air_bag_schedule_ *air_bag_schedule_table[SEAT_FUNC_MODE_AMOUNT] = { air_bag_schedule_idle, air_bag_schedule_cycle_1, air_bag_schedule_cycle_2, air_bag_schedule_cycle_3, air_bag_schedule_cycle_4, air_bag_schedule_cycle_5, air_bag_schedule_cycle_6, air_bag_schedule_cycle_7, air_bag_schedule_cycle_8, }; struct _air_bag_schedule_ *air_bag_schedule1_table[SEAT_FUNC_MODE_AMOUNT] = { air_bag_schedule_idle, air_bag_schedule_lvevel1_cycle_1, air_bag_schedule_lvevel1_cycle_2, air_bag_schedule_lvevel1_cycle_3, air_bag_schedule_lvevel1_cycle_4, air_bag_schedule_lvevel1_cycle_5, air_bag_schedule_cycle_6, air_bag_schedule_cycle_7, air_bag_schedule_cycle_8, }; struct _air_bag_schedule_ *air_bag_schedule2_table[SEAT_FUNC_MODE_AMOUNT] = { air_bag_schedule_idle, air_bag_schedule_lvevel2_cycle_1, air_bag_schedule_lvevel2_cycle_2, air_bag_schedule_lvevel2_cycle_3, air_bag_schedule_lvevel2_cycle_4, air_bag_schedule_lvevel2_cycle_5, air_bag_schedule_cycle_6, air_bag_schedule_cycle_7, air_bag_schedule_cycle_8, }; // 气囊工作调度状态 struct _air_bag_ctrl_ air_bag_ctrl = { 0, 0, 0, 0, 0, 0, // u8 current_mode; //气囊调度模式 // struct _air_bag_schedule_ *schedule_table_pntr; //调度表指针 // u8 schedule_table_size; //调度表大小 // u8 current_schedule_index; //气囊当前调度时序索引 // u32 current_schedule_duration; //气囊当前调度时序持续时间 // u8 current_repeat_start_index; //重复开始索引 // u8 current_repeat_end_index; //重复结束索引 // u8 current_repeat_times; //重复组索引 }; void statck_pop(int stack_idx) { schedule_stack[stack_idx].OpAirBagGroup = 0; schedule_stack[stack_idx].InDuration = 0; schedule_stack[stack_idx].HoldDuration = 0; schedule_stack[stack_idx].OutDuration = 0; schedule_stack[stack_idx].NextOpDuration = 0; schedule_stack[stack_idx].sch_idx_max = 0; schedule_stack[stack_idx].sch_idx = 0; schedule_stack[stack_idx].IsUsed = 0; } void statck_push(struct _air_bag_schedule_ *schedule) { int i; for (i = 0; i < STACK_SIZE; i++) { if (schedule_stack[i].IsUsed == 0) { schedule_stack[i].HoldDuration = schedule->HoldDuration; schedule_stack[i].OpAirBagGroup = schedule->OpAirBagGroup; schedule_stack[i].InDuration = schedule->InDuration; schedule_stack[i].OutDuration = schedule->OutDuration; schedule_stack[i].NextOpDuration = schedule->NextOpDuration; schedule_stack[i].sch_idx = schedule->sch_idx; schedule_stack[i].sch_idx_max = schedule->sch_idx_max; schedule_stack[i].IsUsed = 1; break; } } } void statck_init(void) { int i; for (i = 0; i < STACK_SIZE; i++) { schedule_stack[i].HoldDuration = 0; schedule_stack[i].OpAirBagGroup = 0; schedule_stack[i].InDuration = 0; schedule_stack[i].OutDuration = 0; schedule_stack[i].NextOpDuration = 0; schedule_stack[i].sch_idx = 0; schedule_stack[i].sch_idx_max = 0; schedule_stack[i].IsUsed = 0; } } void air_bag_set_in(uint16_t air_bag_group) { if (air_bag_group & AIR_BAG_MASK) MOTOR_ON else MOTOR_OFF; #if 0 //双气囊滚动正常 //简单滚动正常 if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_1 ) SWITCH_1_ON else SWITCH_1_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_2 ) SWITCH_2_ON else SWITCH_2_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_3 ) SWITCH_3_ON else SWITCH_3_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_4 ) SWITCH_4_ON else SWITCH_4_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_5 ) SWITCH_5_ON else SWITCH_5_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_6 ) SWITCH_6_ON else SWITCH_6_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_7 ) SWITCH_7_ON else SWITCH_7_OFF ; if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_8 ) SWITCH_8_ON else SWITCH_8_OFF ; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_9 ) SWITCH_9_ON else SWITCH_9_OFF ; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_10) SWITCH_10_ON else SWITCH_10_OFF ; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_11) SWITCH_11_ON else SWITCH_11_OFF ; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_12) SWITCH_12_ON else SWITCH_12_OFF ; #else // 三气囊翻滚模式正常 // 简单滚动正常 // 双气囊滚动正常 if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_1) SWITCH_1_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_2) SWITCH_2_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_3) SWITCH_3_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_4) SWITCH_4_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_5) SWITCH_5_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_6) SWITCH_6_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_7) SWITCH_7_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_8) SWITCH_8_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_9) SWITCH_9_ON; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_10) SWITCH_10_ON; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_11) SWITCH_11_ON else SWITCH_11_OFF ; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_12) SWITCH_12_ON else SWITCH_12_OFF ; #endif } void air_bag_set_out(uint16_t air_bag_group) { // if(air_bag_group & AIR_BAG_MASK ) MOTOR_ON else MOTOR_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_1) SWITCH_1_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_2) SWITCH_2_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_3) SWITCH_3_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_4) SWITCH_4_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_5) SWITCH_5_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_6) SWITCH_6_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_7) SWITCH_7_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_8) SWITCH_8_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_9) SWITCH_9_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_10) SWITCH_10_OFF; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_11) SWITCH_11_OFF; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_12) SWITCH_12_OFF; } void air_bag_set_idle(uint16_t air_bag_group) { if (air_bag_group & AIR_BAG_MASK) MOTOR_ON else MOTOR_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_1) SWITCH_1_ON else SWITCH_1_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_2) SWITCH_2_ON else SWITCH_2_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_3) SWITCH_3_ON else SWITCH_3_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_4) SWITCH_4_ON else SWITCH_4_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_5) SWITCH_5_ON else SWITCH_5_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_6) SWITCH_6_ON else SWITCH_6_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_7) SWITCH_7_ON else SWITCH_7_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_8) SWITCH_8_ON else SWITCH_8_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_9) SWITCH_9_ON else SWITCH_9_OFF; if (air_bag_group & SCHEDULE_AIR_BAG_INDEX_10) SWITCH_10_ON else SWITCH_10_OFF; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_11) SWITCH_11_ON else SWITCH_11_OFF; // if(air_bag_group & SCHEDULE_AIR_BAG_INDEX_12) SWITCH_12_ON else SWITCH_12_OFF; } /** * @} */ /** * @} */
10-24
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值