Skip to content

Commit

Permalink
kernel: logging: convert K_DEBUG to LOG_DBG
Browse files Browse the repository at this point in the history
Move K_DEBUG to use LOG_DBG instead of plain printk.

Signed-off-by: Anas Nashif <[email protected]>
  • Loading branch information
nashif authored and MaureenHelm committed Jun 25, 2020
1 parent e71e803 commit 2c5d404
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 28 deletions.
6 changes: 0 additions & 6 deletions include/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,6 @@ extern "C" {
* @}
*/

#ifdef CONFIG_KERNEL_DEBUG
#define K_DEBUG(fmt, ...) printk("[%s] " fmt, __func__, ##__VA_ARGS__)
#else
#define K_DEBUG(fmt, ...)
#endif

#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
Expand Down
8 changes: 0 additions & 8 deletions kernel/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -297,14 +297,6 @@ config INIT_STACKS
water mark can be easily determined. This applies to the stack areas
for threads, as well as to the interrupt stack.

config KERNEL_DEBUG
bool "Kernel debugging"
select INIT_STACKS
help
Enable kernel debugging.

Note that debugging the kernel internals can be very verbose.

config BOOT_BANNER
bool "Boot banner"
default y
Expand Down
2 changes: 0 additions & 2 deletions kernel/include/ksched.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,6 @@ static inline void z_sched_lock(void)

compiler_barrier();

K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->base.sched_locked);
#endif
}

Expand Down
19 changes: 10 additions & 9 deletions kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include <syscall_handler.h>
#include <tracing/tracing.h>
#include <sys/check.h>
LOG_MODULE_DECLARE(os);

/* We use a global spinlock here because some of the synchronization
* is protecting things like owner thread priorities which aren't
Expand Down Expand Up @@ -106,7 +107,7 @@ static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio)
{
if (mutex->owner->base.prio != new_prio) {

K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n",
LOG_DBG("%p (ready (y/n): %c) prio changed to %d (was %d)",
mutex->owner, z_is_thread_ready(mutex->owner) ?
'y' : 'n',
new_prio, mutex->owner->base.prio);
Expand Down Expand Up @@ -136,7 +137,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
mutex->lock_count++;
mutex->owner = _current;

K_DEBUG("%p took mutex %p, count: %d, orig prio: %d\n",
LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
_current, mutex, mutex->lock_count,
mutex->owner_orig_prio);

Expand All @@ -155,17 +156,17 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
new_prio = new_prio_for_inheritance(_current->base.prio,
mutex->owner->base.prio);

K_DEBUG("adjusting prio up on mutex %p\n", mutex);
LOG_DBG("adjusting prio up on mutex %p", mutex);

if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
resched = adjust_owner_prio(mutex, new_prio);
}

int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);

K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);

K_DEBUG("%p got mutex %p (y/n): %c\n", _current, mutex,
LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex,
got_mutex ? 'y' : 'n');

if (got_mutex == 0) {
Expand All @@ -175,7 +176,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)

/* timed out */

K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
LOG_DBG("%p timeout on mutex %p", _current, mutex);

key = k_spin_lock(&lock);

Expand All @@ -185,7 +186,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) :
mutex->owner_orig_prio;

K_DEBUG("adjusting prio down on mutex %p\n", mutex);
LOG_DBG("adjusting prio down on mutex %p", mutex);

resched = adjust_owner_prio(mutex, new_prio) || resched;

Expand Down Expand Up @@ -236,7 +237,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK);
z_sched_lock();

K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count);
LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count);

/*
* If we are the owner and count is greater than 1, then decrement
Expand All @@ -256,7 +257,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)

mutex->owner = new_owner;

K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
LOG_DBG("new owner of mutex %p: %p (prio: %d)",
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);

if (new_owner != NULL) {
Expand Down
6 changes: 4 additions & 2 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
#include <drivers/timer/system_timer.h>
#include <stdbool.h>
#include <kernel_internal.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os);

/* Maximum time between the time a self-aborting thread flags itself
* DEAD and the last read or write to its stack memory (i.e. the time
Expand Down Expand Up @@ -782,7 +784,7 @@ void k_sched_unlock(void)
update_cache(0);
}

K_DEBUG("scheduler unlocked (%p:%d)\n",
LOG_DBG("scheduler unlocked (%p:%d)",
_current, _current->base.sched_locked);

z_reschedule_unlocked();
Expand Down Expand Up @@ -1149,7 +1151,7 @@ static int32_t z_tick_sleep(int32_t ticks)

__ASSERT(!arch_is_in_isr(), "");

K_DEBUG("thread %p for %d ticks\n", _current, ticks);
LOG_DBG("thread %p for %d ticks", _current, ticks);

/* wait of 0 ms is treated as a 'yield' */
if (ticks == 0) {
Expand Down
1 change: 0 additions & 1 deletion tests/misc/test_build/debug.conf
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
CONFIG_TEST=y
CONFIG_DEBUG=y
CONFIG_STDOUT_CONSOLE=y
CONFIG_KERNEL_DEBUG=y
CONFIG_ASSERT=y

0 comments on commit 2c5d404

Please sign in to comment.