-
Notifications
You must be signed in to change notification settings - Fork 349
schedule: zephyr_dma_domain: Fix various issues #7672
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
ae98ac2
f69dd35
cbae300
481766f
2389f13
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -195,13 +195,10 @@ static inline void domain_disable(struct ll_schedule_domain *domain, int core) | |
| static inline bool domain_is_pending(struct ll_schedule_domain *domain, | ||
| struct task *task, struct comp_dev **comp) | ||
| { | ||
| bool ret; | ||
|
|
||
| assert(domain->ops->domain_is_pending); | ||
|
|
||
| ret = domain->ops->domain_is_pending(domain, task, comp); | ||
|
|
||
| return ret; | ||
| if (domain->ops->domain_is_pending) | ||
| return domain->ops->domain_is_pending(domain, task, comp); | ||
| else | ||
| return true; | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Usual pattern here is to remove the |
||
| } | ||
|
|
||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -52,6 +52,31 @@ struct zephyr_dma_domain_data { | |
| struct dma_chan_data *channel; /* channel data */ | ||
| bool enabled_irq; /* true if DMA IRQ was already enabled */ | ||
| struct zephyr_dma_domain_thread *dt; | ||
| /* set to true by the DMA IRQ handler when an IRQ for | ||
| * associated channel was triggered. | ||
| */ | ||
| bool triggered; | ||
| /* used to keep track of how many pipeline tasks point | ||
| * to the same data structure. | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is it the same as just "number of tasks, sharing the same data structure?"
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yep. |
||
| */ | ||
| int usage_count; | ||
| /* used to keep track of how many times data->triggered | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we similarly simplify this and remove "used to keep track of?"
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sure |
||
| * can be left set to true before resetting it to false. | ||
| * | ||
| * this is needed for mixer topologies because the | ||
| * non-registrable pipeline tasks need to be executed | ||
| * when an IRQ comes for the registrable pipeline task. | ||
| */ | ||
| int pending_count; | ||
| /* used when linking a data structure to a non-registrable | ||
| * pipeline task. | ||
| * | ||
| * in the case of mixer, the non-registrable pipeline tasks | ||
| * have the same scheduling component as the registrable | ||
| * pipeline task so this can be used as a criteria to attach | ||
| * the data structure to the non-registrable pipeline task. | ||
| */ | ||
| struct comp_dev *sched_comp; | ||
| }; | ||
|
|
||
| struct zephyr_dma_domain_thread { | ||
|
|
@@ -80,11 +105,14 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, | |
| uint32_t num_tasks); | ||
| static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, | ||
| struct task *task); | ||
| static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain, | ||
| struct task *task, struct comp_dev **comp); | ||
|
|
||
| static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = { | ||
| .domain_register = zephyr_dma_domain_register, | ||
| .domain_unregister = zephyr_dma_domain_unregister, | ||
| .domain_task_cancel = zephyr_dma_domain_task_cancel | ||
| .domain_task_cancel = zephyr_dma_domain_task_cancel, | ||
| .domain_is_pending = zephyr_dma_domain_is_pending, | ||
| }; | ||
|
|
||
| struct ll_schedule_domain *zephyr_dma_domain_init(struct dma *dma_array, | ||
|
|
@@ -143,13 +171,21 @@ static void dma_irq_handler(void *data) | |
| irq = zephyr_dma_domain_data->irq; | ||
| dt = zephyr_dma_domain_data->dt; | ||
|
|
||
| /* clear IRQ */ | ||
| dma_interrupt_legacy(channel, DMA_IRQ_CLEAR); | ||
| interrupt_clear_mask(irq, BIT(channel_index)); | ||
| /* was the IRQ triggered by this channel? */ | ||
| if (dma_interrupt_legacy(channel, DMA_IRQ_STATUS_GET)) { | ||
| /* IRQ is for this channel. Make sure that the scheduler | ||
| * can schedule the associated pipeline task. | ||
| */ | ||
| zephyr_dma_domain_data->triggered = true; | ||
|
|
||
| /* give resources to thread semaphore */ | ||
| if (dt->handler) | ||
| k_sem_give(sem); | ||
| /* clear IRQ */ | ||
| dma_interrupt_legacy(channel, DMA_IRQ_CLEAR); | ||
| interrupt_clear_mask(irq, BIT(channel_index)); | ||
|
|
||
| /* give resources to thread semaphore */ | ||
| if (dt->handler) | ||
| k_sem_give(sem); | ||
| } | ||
| } | ||
|
|
||
| static int enable_dma_irq(struct zephyr_dma_domain_data *data) | ||
|
|
@@ -204,6 +240,24 @@ static int register_dma_irq(struct zephyr_dma_domain *domain, | |
| if (core != crt_chan->core) | ||
| continue; | ||
|
|
||
| if ((crt_data->sched_comp == pipe_task->sched_comp || | ||
| !crt_data->sched_comp) && !pipe_task->registrable) { | ||
| /* attach crt_data to current pipeline | ||
| * task only if there's a match between | ||
| * the scheduling components or i | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "if" |
||
| * crt_data's sched_comp field wasn't | ||
| * set yet. | ||
| */ | ||
| pipe_task->priv = crt_data; | ||
|
|
||
| crt_data->usage_count++; | ||
| crt_data->pending_count++; | ||
|
|
||
| return 0; | ||
| } else if (!pipe_task->registrable) { | ||
| return 0; | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we do this: |
||
| } | ||
|
|
||
| /* skip if IRQ was already registered */ | ||
| if (crt_data->enabled_irq) | ||
| continue; | ||
|
|
@@ -215,6 +269,11 @@ static int register_dma_irq(struct zephyr_dma_domain *domain, | |
| crt_data->irq = irq; | ||
| crt_data->channel = crt_chan; | ||
| crt_data->dt = dt; | ||
| crt_data->sched_comp = pipe_task->sched_comp; | ||
| crt_data->usage_count++; | ||
| crt_data->pending_count++; | ||
|
|
||
| pipe_task->priv = crt_data; | ||
|
|
||
| if (dt->started) { | ||
| /* if the Zephyr thread was started, we | ||
|
|
@@ -261,16 +320,6 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, | |
|
|
||
| tr_info(&ll_tr, "zephyr_dma_domain_register()"); | ||
|
|
||
| /* don't even bother trying to register DMA IRQ for | ||
| * non-registrable tasks. | ||
| * | ||
| * this is needed because zephyr_dma_domain_register() will | ||
| * return -EINVAL for non-registrable tasks because of | ||
| * register_dma_irq() which is not right. | ||
| */ | ||
| if (!pipe_task->registrable) | ||
| return 0; | ||
|
|
||
| /* the DMA IRQ has to be registered before the Zephyr thread is | ||
| * started. | ||
| * | ||
|
|
@@ -285,12 +334,16 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, | |
|
|
||
| if (ret < 0) { | ||
| tr_err(&ll_tr, | ||
| "failed to register DMA IRQ for pipe task %p on core %d", | ||
| pipe_task, core); | ||
| "failed to register DMA IRQ for pipe task %p on core %d reg: %d", | ||
| pipe_task, core, pipe_task->registrable); | ||
|
|
||
| return ret; | ||
| } | ||
|
|
||
| /* don't let non-registrable tasks go further */ | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this comment doesn't add any information - it simply restates the next statement. If a comment is needed here, it should explain something that isn't obvious from the code
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You're right. Will remove or rework it. |
||
| if (!pipe_task->registrable) | ||
| return 0; | ||
|
|
||
| /* skip if Zephyr thread was already started on this core */ | ||
| if (dt->handler) | ||
| return 0; | ||
|
|
@@ -429,15 +482,20 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, | |
| struct zephyr_dma_domain *zephyr_dma_domain; | ||
| struct zephyr_dma_domain_thread *dt; | ||
| struct pipeline_task *pipe_task; | ||
| struct zephyr_dma_domain_data *data; | ||
| int ret, core; | ||
|
|
||
| zephyr_dma_domain = ll_sch_get_pdata(domain); | ||
| pipe_task = pipeline_task_get(task); | ||
| core = cpu_get_id(); | ||
| dt = zephyr_dma_domain->domain_thread + core; | ||
| data = pipe_task->priv; | ||
|
|
||
| tr_info(&ll_tr, "zephyr_dma_domain_unregister()"); | ||
|
|
||
| data->pending_count--; | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure that you should do this unconditionally. What if we're unregistering a domain that isn't pending? Can this lead to a negative value for pending_count?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In theory this shouldn't happen since
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Still need to stress test and if this does happen then we have a really big problem.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah I feel like this very decrement can get it below zero, before I don't want this to be even possible (even if it takes a long while to actually happen -- see my recent submission which had a bug that took 3 years to manifest)
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do you have a particular scenario in mind?
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Say, if you're somehow stopping two pipelines quickly which leads to unregistering things inbetween them. Say if it's playback and capture (which work with different scheduling component). The following order may cause issues:
It's pretty hard to actually have this happen I might say, but having "pending" not equate an actual pipeline walk pending simply doesn't sit well with me (variable name hinting one thing while it technically isn't that thing) Edit: Or is "pending_count" separate between scheduling components, and thus more of a thing to help order things? That would make the scenario impossible, in which case my complaint becomes one of naming.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Yep, pretty much. The purpose of
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Okay then my only remaining complaint is that the name can send you down the wrong train of thought. I can accept it if nobody else agrees with me though.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe add an |
||
| data->usage_count--; | ||
|
|
||
| /* unregister the DMA IRQ only for PPL tasks marked as "registrable" | ||
| * | ||
| * this is done because, in case of mixer topologies there's | ||
|
|
@@ -491,3 +549,33 @@ static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, | |
| k_sem_give(&dt->sem); | ||
| } | ||
| } | ||
|
|
||
| static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain, | ||
| struct task *task, struct comp_dev **comp) | ||
| { | ||
| struct zephyr_dma_domain_data *data; | ||
| struct pipeline_task *pipe_task; | ||
|
|
||
| pipe_task = pipeline_task_get(task); | ||
| data = pipe_task->priv; | ||
|
|
||
| /* note: there's no need to disable IRQs here as they already | ||
| * are inside zephyr_ll_run when this function is called. | ||
| * | ||
| * VERY IMPORTANT: if this function needs to be moved outside | ||
| * of the atomic area PLEASE make sure to disable IRQs before | ||
| * calling it or disable IRQs here. | ||
| */ | ||
| if (data->triggered) { | ||
| data->pending_count--; | ||
|
|
||
| if (!data->pending_count) { | ||
| data->triggered = false; | ||
| data->pending_count = data->usage_count; | ||
| } | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| return false; | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I wouldn't mention about Zephyr DMA domain here. Other's might use this also in the future. This is common enough inside structures so a generic comment like
private data for internal housekeepingwould be enough.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
cannot you just add your
ctrl_datapointer tostruct zephyr_ll_pdatastored intask.priv_data?