Skip to content

Commit b8fb353

Browse files
nordic-krchcarlescufi
authored andcommitted
kernel: Move k_busy_wait from thread to timeout
K_busy_wait is the only function from thread.c that is used when CONFIG_MULTITHREADING=n. Moving to timeout since it fits better there as it requires sys clock to be present. Signed-off-by: Krzysztof Chruscinski <[email protected]>
1 parent 09ba258 commit b8fb353

File tree

2 files changed

+37
-38
lines changed

2 files changed

+37
-38
lines changed

kernel/thread.c

-38
Original file line numberDiff line numberDiff line change
@@ -120,44 +120,6 @@ bool z_is_thread_essential(void)
120120
return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
121121
}
122122

123-
#ifdef CONFIG_SYS_CLOCK_EXISTS
124-
void z_impl_k_busy_wait(uint32_t usec_to_wait)
125-
{
126-
if (usec_to_wait == 0U) {
127-
return;
128-
}
129-
130-
#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
131-
uint32_t start_cycles = k_cycle_get_32();
132-
133-
/* use 64-bit math to prevent overflow when multiplying */
134-
uint32_t cycles_to_wait = (uint32_t)(
135-
(uint64_t)usec_to_wait *
136-
(uint64_t)sys_clock_hw_cycles_per_sec() /
137-
(uint64_t)USEC_PER_SEC
138-
);
139-
140-
for (;;) {
141-
uint32_t current_cycles = k_cycle_get_32();
142-
143-
/* this handles the rollover on an unsigned 32-bit value */
144-
if ((current_cycles - start_cycles) >= cycles_to_wait) {
145-
break;
146-
}
147-
}
148-
#else
149-
arch_busy_wait(usec_to_wait);
150-
#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
151-
}
152-
153-
#ifdef CONFIG_USERSPACE
154-
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
155-
{
156-
z_impl_k_busy_wait(usec_to_wait);
157-
}
158-
#include <syscalls/k_busy_wait_mrsh.c>
159-
#endif /* CONFIG_USERSPACE */
160-
#endif /* CONFIG_SYS_CLOCK_EXISTS */
161123

162124
#ifdef CONFIG_THREAD_CUSTOM_DATA
163125
void z_impl_k_thread_custom_data_set(void *value)

kernel/timeout.c

+37
Original file line numberDiff line numberDiff line change
@@ -297,6 +297,43 @@ static inline int64_t z_vrfy_k_uptime_ticks(void)
297297
#include <syscalls/k_uptime_ticks_mrsh.c>
298298
#endif
299299

300+
void z_impl_k_busy_wait(uint32_t usec_to_wait)
301+
{
302+
if (usec_to_wait == 0U) {
303+
return;
304+
}
305+
306+
#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
307+
uint32_t start_cycles = k_cycle_get_32();
308+
309+
/* use 64-bit math to prevent overflow when multiplying */
310+
uint32_t cycles_to_wait = (uint32_t)(
311+
(uint64_t)usec_to_wait *
312+
(uint64_t)sys_clock_hw_cycles_per_sec() /
313+
(uint64_t)USEC_PER_SEC
314+
);
315+
316+
for (;;) {
317+
uint32_t current_cycles = k_cycle_get_32();
318+
319+
/* this handles the rollover on an unsigned 32-bit value */
320+
if ((current_cycles - start_cycles) >= cycles_to_wait) {
321+
break;
322+
}
323+
}
324+
#else
325+
arch_busy_wait(usec_to_wait);
326+
#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
327+
}
328+
329+
#ifdef CONFIG_USERSPACE
330+
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
331+
{
332+
z_impl_k_busy_wait(usec_to_wait);
333+
}
334+
#include <syscalls/k_busy_wait_mrsh.c>
335+
#endif /* CONFIG_USERSPACE */
336+
300337
/* Returns the uptime expiration (relative to an unlocked "now"!) of a
301338
* timeout object. When used correctly, this should be called once,
302339
* synchronously with the user passing a new timeout value. It should

0 commit comments

Comments
 (0)