1
0
Fork 0
mirror of https://github.com/betaflight/betaflight.git synced 2025-07-23 16:25:31 +03:00

Add option for scheduling policy targeting constant average task rates

This commit is contained in:
Thorsten Laux 2019-01-11 13:29:25 +01:00 committed by mikeller
parent 2bf2ded102
commit 33741dce75
8 changed files with 52 additions and 4 deletions

View file

@ -62,6 +62,8 @@
#include "sensors/battery.h"
#include "sensors/gyro.h"
#include "scheduler/scheduler.h"
pidProfile_t *currentPidProfile;
#ifndef RX_SPI_DEFAULT_PROTOCOL
@ -88,6 +90,7 @@ PG_RESET_TEMPLATE(systemConfig_t, systemConfig,
.boardIdentifier = TARGET_BOARD_IDENTIFIER,
.hseMhz = SYSTEM_HSE_VALUE, // Not used for non-F4 targets
.configured = false,
.schedulerPolicy = SCHEDULER_POLICY_PRIORITIZE_PERIOD,
);
uint8_t getCurrentPidProfileIndex(void)
@ -121,6 +124,7 @@ void resetConfigs(void)
static void activateConfig(void)
{
schedulerSetPolicy(systemConfig()->schedulerPolicy);
loadPidProfile();
loadControlRateProfile();

View file

@ -33,6 +33,11 @@ typedef struct pilotConfig_s {
PG_DECLARE(pilotConfig_t, pilotConfig);
typedef enum {
SCHEDULER_POLICY_PRIORITIZE_PERIOD,
SCHEDULER_POLICY_PRIORITIZE_AVERAGE_RATE,
} schedulerPolicy_e;
typedef struct systemConfig_s {
uint8_t pidProfileIndex;
uint8_t activeRateProfile;
@ -44,6 +49,7 @@ typedef struct systemConfig_s {
char boardIdentifier[sizeof(TARGET_BOARD_IDENTIFIER) + 1];
uint8_t hseMhz; // Not used for non-F4 targets
uint8_t configured;
uint8_t schedulerPolicy;
} systemConfig_t;
PG_DECLARE(systemConfig_t, systemConfig);

View file

@ -3766,7 +3766,7 @@ static void cliTasks(char *cmdline)
int taskFrequency;
int subTaskFrequency = 0;
if (taskId == TASK_GYROPID) {
subTaskFrequency = taskInfo.latestDeltaTime == 0 ? 0 : (int)(1000000.0f / ((float)taskInfo.latestDeltaTime));
subTaskFrequency = taskInfo.movingAverageCycleTime == 0.0f ? 0.0f : (int)(1000000.0f / (taskInfo.movingAverageCycleTime));
taskFrequency = subTaskFrequency / pidConfig()->pid_process_denom;
if (pidConfig()->pid_process_denom > 1) {
cliPrintf("%02d - (%15s) ", taskId, taskInfo.taskName);

View file

@ -423,6 +423,11 @@ static const char * const lookupTableTpaMode[] = {
};
#endif
static const char* const lookupTableSchedulerPolicy[] = {
"PERIOD", "RATE"
};
#define LOOKUP_TABLE_ENTRY(name) { name, ARRAYLEN(name) }
const lookupTableEntry_t lookupTables[] = {
@ -531,6 +536,7 @@ const lookupTableEntry_t lookupTables[] = {
#ifdef USE_TPA_MODE
LOOKUP_TABLE_ENTRY(lookupTableTpaMode),
#endif
LOOKUP_TABLE_ENTRY(lookupTableSchedulerPolicy)
};
#undef LOOKUP_TABLE_ENTRY
@ -1175,6 +1181,7 @@ const clivalue_t valueTable[] = {
{ "cpu_overclock", VAR_UINT8 | MASTER_VALUE | MODE_LOOKUP, .config.lookup = { TABLE_OVERCLOCK }, PG_SYSTEM_CONFIG, offsetof(systemConfig_t, cpu_overclock) },
#endif
{ "pwr_on_arm_grace", VAR_UINT8 | MASTER_VALUE, .config.minmax = { 0, 30 }, PG_SYSTEM_CONFIG, offsetof(systemConfig_t, powerOnArmingGraceTime) },
{ "scheduler_policy", VAR_UINT8 | MASTER_VALUE | MODE_LOOKUP, .config.lookup = { TABLE_SCHEDULER_POLICY }, PG_SYSTEM_CONFIG, offsetof(systemConfig_t, schedulerPolicy) },
// PG_VTX_CONFIG
#ifdef USE_VTX_COMMON

View file

@ -131,6 +131,7 @@ typedef enum {
#ifdef USE_TPA_MODE
TABLE_TPA_MODE,
#endif
TABLE_SCHEDULER_POLICY,
LOOKUP_TABLE_COUNT
} lookupTableIndex_e;

View file

@ -53,10 +53,12 @@ static FAST_RAM_ZERO_INIT uint32_t totalWaitingTasksSamples;
static FAST_RAM_ZERO_INIT bool calculateTaskStatistics;
FAST_RAM_ZERO_INIT uint16_t averageSystemLoadPercent = 0;
static FAST_RAM_ZERO_INIT int taskQueuePos = 0;
STATIC_UNIT_TESTED FAST_RAM_ZERO_INIT int taskQueueSize = 0;
static FAST_RAM_ZERO_INIT schedulerPolicy_e policy;
static FAST_RAM_ZERO_INIT int periodCalculationBasisOffset = offsetof(cfTask_t, lastExecutedAt);
// No need for a linked list for the queue, since items are only inserted at startup
STATIC_UNIT_TESTED FAST_RAM_ZERO_INIT cfTask_t* taskQueueArray[TASK_COUNT + 1]; // extra item for NULL pointer at end of queue
@ -164,6 +166,7 @@ void getTaskInfo(cfTaskId_e taskId, cfTaskInfo_t * taskInfo)
taskInfo->totalExecutionTime = cfTasks[taskId].totalExecutionTime;
taskInfo->averageExecutionTime = cfTasks[taskId].movingSumExecutionTime / MOVING_SUM_COUNT;
taskInfo->latestDeltaTime = cfTasks[taskId].taskLatestDeltaTime;
taskInfo->movingAverageCycleTime = cfTasks[taskId].movingAverageCycleTime;
#endif
}
@ -243,6 +246,22 @@ void schedulerInit(void)
queueAdd(&cfTasks[TASK_SYSTEM]);
}
void schedulerSetPolicy(schedulerPolicy_e newPolicy)
{
policy = newPolicy;
if (policy == SCHEDULER_POLICY_PRIORITIZE_AVERAGE_RATE) {
periodCalculationBasisOffset = offsetof(cfTask_t, lastDesiredAt);
} else
{
periodCalculationBasisOffset = offsetof(cfTask_t, lastExecutedAt);
}
}
inline static timeUs_t getPeriodCalculationBasis(const cfTask_t* task)
{
return *(timeUs_t*)((uint8_t*)task + periodCalculationBasisOffset);
}
FAST_CODE void scheduler(void)
{
// Cache currentTime
@ -251,7 +270,7 @@ FAST_CODE void scheduler(void)
// Check for realtime tasks
bool outsideRealtimeGuardInterval = true;
for (const cfTask_t *task = queueFirst(); task != NULL && task->staticPriority >= TASK_PRIORITY_REALTIME; task = queueNext()) {
const timeUs_t nextExecuteAt = task->lastExecutedAt + task->desiredPeriod;
const timeUs_t nextExecuteAt = getPeriodCalculationBasis(task) + task->desiredPeriod;
if ((timeDelta_t)(currentTimeUs - nextExecuteAt) >= 0) {
outsideRealtimeGuardInterval = false;
break;
@ -299,7 +318,7 @@ FAST_CODE void scheduler(void)
} else {
// Task is time-driven, dynamicPriority is last execution age (measured in desiredPeriods)
// Task age is calculated from last execution
task->taskAgeCycles = ((currentTimeUs - task->lastExecutedAt) / task->desiredPeriod);
task->taskAgeCycles = ((currentTimeUs - getPeriodCalculationBasis(task)) / task->desiredPeriod);
if (task->taskAgeCycles > 0) {
task->dynamicPriority = 1 + task->staticPriority * task->taskAgeCycles;
waitingTasks++;
@ -326,7 +345,9 @@ FAST_CODE void scheduler(void)
if (selectedTask) {
// Found a task that should be run
selectedTask->taskLatestDeltaTime = currentTimeUs - selectedTask->lastExecutedAt;
float period = currentTimeUs - selectedTask->lastExecutedAt;
selectedTask->lastExecutedAt = currentTimeUs;
selectedTask->lastDesiredAt += (cmpTimeUs(currentTimeUs,selectedTask->lastDesiredAt) / selectedTask->desiredPeriod) * selectedTask->desiredPeriod;
selectedTask->dynamicPriority = 0;
// Execute task
@ -338,6 +359,7 @@ FAST_CODE void scheduler(void)
selectedTask->movingSumExecutionTime += taskExecutionTime - selectedTask->movingSumExecutionTime / MOVING_SUM_COUNT;
selectedTask->totalExecutionTime += taskExecutionTime; // time consumed by scheduler + task
selectedTask->maxExecutionTime = MAX(selectedTask->maxExecutionTime, taskExecutionTime);
selectedTask->movingAverageCycleTime += 0.05f * (period - selectedTask->movingAverageCycleTime);
} else
#endif
{

View file

@ -21,6 +21,7 @@
#pragma once
#include "common/time.h"
#include "fc/config.h"
#define TASK_PERIOD_HZ(hz) (1000000 / (hz))
#define TASK_PERIOD_MS(ms) ((ms) * 1000)
@ -53,6 +54,7 @@ typedef struct {
timeUs_t maxExecutionTime;
timeUs_t totalExecutionTime;
timeUs_t averageExecutionTime;
float movingAverageCycleTime;
} cfTaskInfo_t;
typedef enum {
@ -157,9 +159,11 @@ typedef struct {
timeDelta_t taskLatestDeltaTime;
timeUs_t lastExecutedAt; // last time of invocation
timeUs_t lastSignaledAt; // time of invocation event for event-driven tasks
timeUs_t lastDesiredAt; // time of last desired execution
#if defined(USE_TASK_STATISTICS)
// Statistics
float movingAverageCycleTime;
timeUs_t movingSumExecutionTime; // moving sum over 32 samples
timeUs_t maxExecutionTime;
timeUs_t totalExecutionTime; // total time consumed by task since boot
@ -181,6 +185,7 @@ void schedulerResetTaskMaxExecutionTime(cfTaskId_e taskId);
void schedulerInit(void);
void scheduler(void);
void taskSystemLoad(timeUs_t currentTime);
void schedulerSetPolicy(schedulerPolicy_e policy);
#define LOAD_PERCENTAGE_ONE 100

View file

@ -389,6 +389,9 @@ TEST(SchedulerUnittest, TestTwoTasks)
// of the two TASK_GYROPID should run first
scheduler();
EXPECT_EQ(&cfTasks[TASK_GYROPID], unittest_scheduler_selectedTask);
// of the two TASK_GYROPID should run again
scheduler();
EXPECT_EQ(&cfTasks[TASK_GYROPID], unittest_scheduler_selectedTask);
// and finally TASK_ACCEL should now run
scheduler();
EXPECT_EQ(&cfTasks[TASK_ACCEL], unittest_scheduler_selectedTask);