Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/include/sof/audio/pipeline.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,10 @@ struct pipeline_task {
struct task task; /**< parent structure */
bool registrable; /**< should task be registered on irq */
struct comp_dev *sched_comp; /**< pipeline scheduling component */
/* private data used by Zephyr DMA domain for internal
* housekeeping - don't touch.
*/
void *priv;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wouldn't mention about Zephyr DMA domain here. Other's might use this also in the future. This is common enough inside structures so a generic comment like private data for internal housekeeping would be enough.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cannot you just add your ctrl_data pointer to struct zephyr_ll_pdata stored in task.priv_data?

};

#define pipeline_task_get(t) container_of(t, struct pipeline_task, task)
Expand Down
11 changes: 4 additions & 7 deletions src/include/sof/schedule/ll_schedule_domain.h
Original file line number Diff line number Diff line change
Expand Up @@ -195,13 +195,10 @@ static inline void domain_disable(struct ll_schedule_domain *domain, int core)
static inline bool domain_is_pending(struct ll_schedule_domain *domain,
struct task *task, struct comp_dev **comp)
{
bool ret;

assert(domain->ops->domain_is_pending);

ret = domain->ops->domain_is_pending(domain, task, comp);

return ret;
if (domain->ops->domain_is_pending)
return domain->ops->domain_is_pending(domain, task, comp);
else
return true;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Usual pattern here is to remove the else and say somthing like this.

if (foo)
     return foo();

return true;

}


Expand Down
126 changes: 107 additions & 19 deletions src/schedule/zephyr_dma_domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,31 @@ struct zephyr_dma_domain_data {
struct dma_chan_data *channel; /* channel data */
bool enabled_irq; /* true if DMA IRQ was already enabled */
struct zephyr_dma_domain_thread *dt;
/* set to true by the DMA IRQ handler when an IRQ for
* associated channel was triggered.
*/
bool triggered;
/* used to keep track of how many pipeline tasks point
* to the same data structure.
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is it the same as just "number of tasks, sharing the same data structure?"

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep.

*/
int usage_count;
/* used to keep track of how many times data->triggered
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we similarly simplify this and remove "used to keep track of?"

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure

* can be left set to true before resetting it to false.
*
* this is needed for mixer topologies because the
* non-registrable pipeline tasks need to be executed
* when an IRQ comes for the registrable pipeline task.
*/
int pending_count;
/* used when linking a data structure to a non-registrable
* pipeline task.
*
* in the case of mixer, the non-registrable pipeline tasks
* have the same scheduling component as the registrable
* pipeline task so this can be used as a criteria to attach
* the data structure to the non-registrable pipeline task.
*/
struct comp_dev *sched_comp;
};

struct zephyr_dma_domain_thread {
Expand Down Expand Up @@ -80,11 +105,14 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain,
uint32_t num_tasks);
static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain,
struct task *task);
static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain,
struct task *task, struct comp_dev **comp);

static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = {
.domain_register = zephyr_dma_domain_register,
.domain_unregister = zephyr_dma_domain_unregister,
.domain_task_cancel = zephyr_dma_domain_task_cancel
.domain_task_cancel = zephyr_dma_domain_task_cancel,
.domain_is_pending = zephyr_dma_domain_is_pending,
};

struct ll_schedule_domain *zephyr_dma_domain_init(struct dma *dma_array,
Expand Down Expand Up @@ -143,13 +171,21 @@ static void dma_irq_handler(void *data)
irq = zephyr_dma_domain_data->irq;
dt = zephyr_dma_domain_data->dt;

/* clear IRQ */
dma_interrupt_legacy(channel, DMA_IRQ_CLEAR);
interrupt_clear_mask(irq, BIT(channel_index));
/* was the IRQ triggered by this channel? */
if (dma_interrupt_legacy(channel, DMA_IRQ_STATUS_GET)) {
/* IRQ is for this channel. Make sure that the scheduler
* can schedule the associated pipeline task.
*/
zephyr_dma_domain_data->triggered = true;

/* give resources to thread semaphore */
if (dt->handler)
k_sem_give(sem);
/* clear IRQ */
dma_interrupt_legacy(channel, DMA_IRQ_CLEAR);
interrupt_clear_mask(irq, BIT(channel_index));

/* give resources to thread semaphore */
if (dt->handler)
k_sem_give(sem);
}
}

static int enable_dma_irq(struct zephyr_dma_domain_data *data)
Expand Down Expand Up @@ -204,6 +240,24 @@ static int register_dma_irq(struct zephyr_dma_domain *domain,
if (core != crt_chan->core)
continue;

if ((crt_data->sched_comp == pipe_task->sched_comp ||
!crt_data->sched_comp) && !pipe_task->registrable) {
/* attach crt_data to current pipeline
* task only if there's a match between
* the scheduling components or i
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"if"

* crt_data's sched_comp field wasn't
* set yet.
*/
pipe_task->priv = crt_data;

crt_data->usage_count++;
crt_data->pending_count++;

return 0;
} else if (!pipe_task->registrable) {
return 0;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we do this:

if (!pipe_task->registrable) {
	if (crt_data->sched_comp && crt_data->sched_comp != pipe_task->sched_comp)
		return 0;

	/* attach crt_data to current pipeline
	...
}

}

/* skip if IRQ was already registered */
if (crt_data->enabled_irq)
continue;
Expand All @@ -215,6 +269,11 @@ static int register_dma_irq(struct zephyr_dma_domain *domain,
crt_data->irq = irq;
crt_data->channel = crt_chan;
crt_data->dt = dt;
crt_data->sched_comp = pipe_task->sched_comp;
crt_data->usage_count++;
crt_data->pending_count++;

pipe_task->priv = crt_data;

if (dt->started) {
/* if the Zephyr thread was started, we
Expand Down Expand Up @@ -261,16 +320,6 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain,

tr_info(&ll_tr, "zephyr_dma_domain_register()");

/* don't even bother trying to register DMA IRQ for
* non-registrable tasks.
*
* this is needed because zephyr_dma_domain_register() will
* return -EINVAL for non-registrable tasks because of
* register_dma_irq() which is not right.
*/
if (!pipe_task->registrable)
return 0;

/* the DMA IRQ has to be registered before the Zephyr thread is
* started.
*
Expand All @@ -285,12 +334,16 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain,

if (ret < 0) {
tr_err(&ll_tr,
"failed to register DMA IRQ for pipe task %p on core %d",
pipe_task, core);
"failed to register DMA IRQ for pipe task %p on core %d reg: %d",
pipe_task, core, pipe_task->registrable);

return ret;
}

/* don't let non-registrable tasks go further */
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this comment doesn't add any information - it simply restates the next statement. If a comment is needed here, it should explain something that isn't obvious from the code

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right. Will remove or rework it.

if (!pipe_task->registrable)
return 0;

/* skip if Zephyr thread was already started on this core */
if (dt->handler)
return 0;
Expand Down Expand Up @@ -429,15 +482,20 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain,
struct zephyr_dma_domain *zephyr_dma_domain;
struct zephyr_dma_domain_thread *dt;
struct pipeline_task *pipe_task;
struct zephyr_dma_domain_data *data;
int ret, core;

zephyr_dma_domain = ll_sch_get_pdata(domain);
pipe_task = pipeline_task_get(task);
core = cpu_get_id();
dt = zephyr_dma_domain->domain_thread + core;
data = pipe_task->priv;

tr_info(&ll_tr, "zephyr_dma_domain_unregister()");

data->pending_count--;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure that you should do this unconditionally. What if we're unregistering a domain that isn't pending? Can this lead to a negative value for pending_count?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In theory this shouldn't happen since domain_is_pending() always resets its value when 0. Since cancelling a task is synchronous that means domain_is_pending() will always be called.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Still need to stress test and if this does happen then we have a really big problem.

Copy link
Copy Markdown
Collaborator

@paulstelian97 paulstelian97 May 23, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I feel like this very decrement can get it below zero, before domain_is_pending() has a chance to reset it.

I don't want this to be even possible (even if it takes a long while to actually happen -- see my recent submission which had a bug that took 3 years to manifest)

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you have a particular scenario in mind?

Copy link
Copy Markdown
Collaborator

@paulstelian97 paulstelian97 May 23, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Say, if you're somehow stopping two pipelines quickly which leads to unregistering things inbetween them. Say if it's playback and capture (which work with different scheduling component). The following order may cause issues:

  1. IRQ arrives for playback; it is handled, pending = 1
  2. Playback pipeline is torn down, pending = 0
  3. Capture pipeline is torn down before IRQ ever arrives for capture this period, pending = -1

It's pretty hard to actually have this happen I might say, but having "pending" not equate an actual pipeline walk pending simply doesn't sit well with me (variable name hinting one thing while it technically isn't that thing)

Edit: Or is "pending_count" separate between scheduling components, and thus more of a thing to help order things? That would make the scenario impossible, in which case my complaint becomes one of naming.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Edit: Or is "pending_count" separate between scheduling components, and thus more of a thing to help order things? That would make the scenario impossible, in which case my complaint becomes one of naming.

Yep, pretty much. The purpose of pending_count is to allow non-registrable pipeline tasks to also be executed since, in the case of mixers, the only task that would be executed without it would be the registrable one. In the case of registrable pipeline tasks (like in your case) this value isn't shared. We can also consider that we have a different pending_count for each scheduling component like you said.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay then my only remaining complaint is that the name can send you down the wrong train of thought. I can accept it if nobody else agrees with me though.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe add an assert() at least

data->usage_count--;

/* unregister the DMA IRQ only for PPL tasks marked as "registrable"
*
* this is done because, in case of mixer topologies there's
Expand Down Expand Up @@ -491,3 +549,33 @@ static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain,
k_sem_give(&dt->sem);
}
}

static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain,
struct task *task, struct comp_dev **comp)
{
struct zephyr_dma_domain_data *data;
struct pipeline_task *pipe_task;

pipe_task = pipeline_task_get(task);
data = pipe_task->priv;

/* note: there's no need to disable IRQs here as they already
* are inside zephyr_ll_run when this function is called.
*
* VERY IMPORTANT: if this function needs to be moved outside
* of the atomic area PLEASE make sure to disable IRQs before
* calling it or disable IRQs here.
*/
if (data->triggered) {
data->pending_count--;

if (!data->pending_count) {
data->triggered = false;
data->pending_count = data->usage_count;
}

return true;
}

return false;
}
4 changes: 4 additions & 0 deletions src/schedule/zephyr_ll.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,10 @@ static void zephyr_ll_run(void *data)
list_item_del(list);
list_item_append(list, &task_head);

/* check if the task can be scheduled */
if (!domain_is_pending(sch->ll_domain, task, NULL))
continue;

zephyr_ll_unlock(sch, &flags);

/*
Expand Down