forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwork_q.c
146 lines (118 loc) · 3.09 KB
/
work_q.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* Workqueue support functions
*/
#include <kernel_structs.h>
#include <wait_q.h>
#include <spinlock.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/check.h>
#define WORKQUEUE_THREAD_NAME "workqueue"
#ifdef CONFIG_SYS_CLOCK_EXISTS
static struct k_spinlock lock;
#endif
extern void z_work_q_main(void *work_q_ptr, void *p2, void *p3);
void k_work_q_start(struct k_work_q *work_q, k_thread_stack_t *stack,
size_t stack_size, int prio)
{
k_queue_init(&work_q->queue);
(void)k_thread_create(&work_q->thread, stack, stack_size, z_work_q_main,
work_q, NULL, NULL, prio, 0, K_NO_WAIT);
k_thread_name_set(&work_q->thread, WORKQUEUE_THREAD_NAME);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
static void work_timeout(struct _timeout *t)
{
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
timeout);
/* submit work to workqueue */
k_work_submit_to_queue(w->work_q, &w->work);
}
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
{
k_work_init(&work->work, handler);
z_init_timeout(&work->timeout);
work->work_q = NULL;
}
static int work_cancel(struct k_delayed_work *work)
{
CHECKIF(work->work_q == NULL) {
return -EALREADY;
}
if (k_work_pending(&work->work)) {
/* Remove from the queue if already submitted */
if (!k_queue_remove(&work->work_q->queue, &work->work)) {
return -EINVAL;
}
} else {
int err = z_abort_timeout(&work->timeout);
if (err) {
return -EALREADY;
}
}
/* Detach from workqueue */
work->work_q = NULL;
atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING);
return 0;
}
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
k_timeout_t delay)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int err = 0;
/* Work cannot be active in multiple queues */
if (work->work_q != NULL && work->work_q != work_q) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = work_cancel(work);
/* -EALREADY indicates the work has already completed so this
* is likely a recurring work.
*/
if (err == -EALREADY) {
err = 0;
} else if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->work_q = work_q;
/* Submit work directly if no delay. Note that this is a
* blocking operation, so release the lock first.
*/
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
k_work_submit_to_queue(work_q, &work->work);
return 0;
}
#ifdef CONFIG_LEGACY_TIMEOUT_API
delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
#endif
/* Add timeout */
z_add_timeout(&work->timeout, work_timeout, delay);
done:
k_spin_unlock(&lock, key);
return err;
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
if (!work->work_q) {
return -EINVAL;
}
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = work_cancel(work);
k_spin_unlock(&lock, key);
return ret;
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */