From 7267d55e379aca0ac0f0ba270c1f5e01476bdc4e Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Thu, 27 Feb 2014 11:16:15 -0600 Subject: Fix how CPU load counts are adjusted so that the total always adds up to 100% --- nuttx/ChangeLog | 5 ++++- nuttx/sched/sched_cpuload.c | 11 ++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/nuttx/ChangeLog b/nuttx/ChangeLog index 96f760b1f..3dc953339 100644 --- a/nuttx/ChangeLog +++ b/nuttx/ChangeLog @@ -6673,4 +6673,7 @@ * arch/arm/src/sam34: The port to the SAM4E is code complete (2014-2-16). * include/cxx: Fix some bad idempotence defintions in header files (2014-2-27). - + * sched/sched_cpuload.c: Change calulation of the total count when the + time constant related delay elapsed. The total count is now always + guaranteed to add up to 100% (excepting only truncation errors) + (2014-2-27). diff --git a/nuttx/sched/sched_cpuload.c b/nuttx/sched/sched_cpuload.c index 802977f87..0f5495750 100644 --- a/nuttx/sched/sched_cpuload.c +++ b/nuttx/sched/sched_cpuload.c @@ -121,16 +121,21 @@ void weak_function sched_process_cpuload(void) if (++g_cpuload_total > (CONFIG_SCHED_CPULOAD_TIMECONSTANT * CLOCKS_PER_SEC)) { - /* Divide the tick count for every task by two */ + uint32_t total = 0; + + /* Divide the tick count for every task by two and recalculate the + * total. + */ for (i = 0; i < CONFIG_MAX_TASKS; i++) { g_pidhash[i].ticks >>= 1; + total += g_pidhash[i].ticks; } - /* Divide the total tick count by two */ + /* Save the new total. */ - g_cpuload_total >>= 1; + g_cpuload_total = total; } } -- cgit v1.2.3