mirror of
https://github.com/betaflight/betaflight.git
synced 2025-07-24 16:55:36 +03:00
Merge pull request #11354 from SteveCEvans/sched_defer
Only prioritise a task if there's time to run it
This commit is contained in:
commit
2c82ddb9a8
3 changed files with 116 additions and 5 deletions
|
@ -431,6 +431,8 @@ static void readSchedulerLocals(task_t *selectedTask, uint8_t selectedTaskDynami
|
|||
|
||||
FAST_CODE void scheduler(void)
|
||||
{
|
||||
static uint32_t checkCycles = 0;
|
||||
static uint32_t scheduleCount = 0;
|
||||
#if !defined(UNIT_TEST)
|
||||
const timeUs_t schedulerStartTimeUs = micros();
|
||||
#endif
|
||||
|
@ -442,6 +444,13 @@ FAST_CODE void scheduler(void)
|
|||
uint32_t nextTargetCycles = 0;
|
||||
int32_t schedLoopRemainingCycles;
|
||||
|
||||
#if defined(UNIT_TEST)
|
||||
if (nextTargetCycles == 0) {
|
||||
lastTargetCycles = getCycleCounter();
|
||||
nextTargetCycles = lastTargetCycles + desiredPeriodCycles;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (gyroEnabled) {
|
||||
// Realtime gyro/filtering/PID tasks get complete priority
|
||||
task_t *gyroTask = getTask(TASK_GYRO);
|
||||
|
@ -601,13 +610,30 @@ FAST_CODE void scheduler(void)
|
|||
}
|
||||
|
||||
if (task->dynamicPriority > selectedTaskDynamicPriority) {
|
||||
selectedTaskDynamicPriority = task->dynamicPriority;
|
||||
selectedTask = task;
|
||||
timeDelta_t taskRequiredTimeUs = task->anticipatedExecutionTime >> TASK_EXEC_TIME_SHIFT;
|
||||
int32_t taskRequiredTimeCycles = (int32_t)clockMicrosToCycles((uint32_t)taskRequiredTimeUs);
|
||||
// Allow a little extra time
|
||||
taskRequiredTimeCycles += checkCycles + taskGuardCycles;
|
||||
|
||||
// If there's no time to run the task, discount it from prioritisation unless aged sufficiently
|
||||
// Don't block the SERIAL task.
|
||||
if ((taskRequiredTimeCycles < schedLoopRemainingCycles) ||
|
||||
((scheduleCount & SCHED_TASK_DEFER_MASK) == 0) ||
|
||||
((task - tasks) == TASK_SERIAL)) {
|
||||
selectedTaskDynamicPriority = task->dynamicPriority;
|
||||
selectedTask = task;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// The number of cycles taken to run the checkers is quite consistent with some higher spikes, but
|
||||
// that doesn't defeat its use
|
||||
checkCycles = cmpTimeCycles(getCycleCounter(), nowCycles);
|
||||
|
||||
if (selectedTask) {
|
||||
// Recheck the available time as checkCycles is only approximate
|
||||
timeDelta_t taskRequiredTimeUs = selectedTask->anticipatedExecutionTime >> TASK_EXEC_TIME_SHIFT;
|
||||
#if defined(USE_LATE_TASK_STATISTICS)
|
||||
selectedTask->execTime = taskRequiredTimeUs;
|
||||
|
@ -676,6 +702,8 @@ FAST_CODE void scheduler(void)
|
|||
#if defined(UNIT_TEST)
|
||||
readSchedulerLocals(selectedTask, selectedTaskDynamicPriority);
|
||||
#endif
|
||||
|
||||
scheduleCount++;
|
||||
}
|
||||
|
||||
void schedulerEnableGyro(void)
|
||||
|
|
|
@ -28,9 +28,11 @@
|
|||
#define TASK_PERIOD_MS(ms) ((ms) * 1000)
|
||||
#define TASK_PERIOD_US(us) (us)
|
||||
|
||||
#define TASK_STATS_MOVING_SUM_COUNT 64
|
||||
#define TASK_STATS_MOVING_SUM_COUNT 64
|
||||
|
||||
#define LOAD_PERCENTAGE_ONE 100
|
||||
#define LOAD_PERCENTAGE_ONE 100
|
||||
|
||||
#define SCHED_TASK_DEFER_MASK 0x07 // Scheduler loop count is masked with this and when 0 long running tasks are processed
|
||||
|
||||
#define SCHED_START_LOOP_MIN_US 1 // Wait at start of scheduler loop if gyroTask is nearly due
|
||||
#define SCHED_START_LOOP_MAX_US 12
|
||||
|
|
|
@ -46,7 +46,7 @@ const int TEST_UPDATE_BATTERY_TIME = 1;
|
|||
const int TEST_UPDATE_RX_CHECK_TIME = 34;
|
||||
const int TEST_UPDATE_RX_MAIN_TIME = 1;
|
||||
const int TEST_IMU_UPDATE_TIME = 5;
|
||||
const int TEST_DISPATCH_TIME = 1;
|
||||
const int TEST_DISPATCH_TIME = 200;
|
||||
const int TEST_UPDATE_OSD_CHECK_TIME = 5;
|
||||
const int TEST_UPDATE_OSD_TIME = 30;
|
||||
|
||||
|
@ -457,6 +457,87 @@ TEST(SchedulerUnittest, TestTwoTasks)
|
|||
EXPECT_EQ(&tasks[TASK_ATTITUDE], unittest_scheduler_selectedTask);
|
||||
}
|
||||
|
||||
TEST(SchedulerUnittest, TestPriorityBump)
|
||||
{
|
||||
// disable all tasks except TASK_ACCEL and TASK_ATTITUDE
|
||||
for (int taskId = 0; taskId < TASK_COUNT; ++taskId) {
|
||||
setTaskEnabled(static_cast<taskId_e>(taskId), false);
|
||||
}
|
||||
setTaskEnabled(TASK_ACCEL, true);
|
||||
setTaskEnabled(TASK_DISPATCH, true);
|
||||
|
||||
// Both tasks have an update rate of 1kHz, but TASK_DISPATCH has TASK_PRIORITY_HIGH whereas TASK_ACCEL has TASK_PRIORITY_MEDIUM
|
||||
static const uint32_t startTime = 4000;
|
||||
simulatedTime = startTime;
|
||||
tasks[TASK_ACCEL].lastExecutedAtUs = simulatedTime;
|
||||
tasks[TASK_DISPATCH].lastExecutedAtUs = tasks[TASK_ACCEL].lastExecutedAtUs;
|
||||
EXPECT_EQ(0, tasks[TASK_DISPATCH].taskAgePeriods);
|
||||
|
||||
// Set expectation for execution time of TEST_DISPATCH_TIME us
|
||||
tasks[TASK_DISPATCH].anticipatedExecutionTime = TEST_DISPATCH_TIME << TASK_EXEC_TIME_SHIFT;
|
||||
|
||||
// run the scheduler
|
||||
scheduler();
|
||||
// no tasks should have run, since neither task's desired time has elapsed
|
||||
EXPECT_EQ(static_cast<task_t*>(0), unittest_scheduler_selectedTask);
|
||||
|
||||
// NOTE:
|
||||
// TASK_ACCEL desiredPeriodUs is 1000 microseconds
|
||||
// TASK_DISPATCH desiredPeriodUs is 1000 microseconds
|
||||
// 500 microseconds later
|
||||
simulatedTime += 500;
|
||||
// no tasks should run, since neither task's desired time has elapsed
|
||||
scheduler();
|
||||
EXPECT_EQ(static_cast<task_t*>(0), unittest_scheduler_selectedTask);
|
||||
|
||||
// 500 microseconds later, 1000 desiredPeriodUs has elapsed
|
||||
simulatedTime += 500;
|
||||
// TASK_ACCEL should now run as there is not enough time to run the higher priority TASK_DISPATCH
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_ACCEL], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(5000 + TEST_UPDATE_ACCEL_TIME, simulatedTime);
|
||||
|
||||
simulatedTime += 1000 - TEST_UPDATE_ACCEL_TIME;
|
||||
// TASK_ACCEL should now run as there is not enough time to run the higher priority TASK_DISPATCH
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_ACCEL], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(6000 + TEST_UPDATE_ACCEL_TIME, simulatedTime);
|
||||
|
||||
simulatedTime += 1000 - TEST_UPDATE_ACCEL_TIME;
|
||||
// TASK_ACCEL should now run as there is not enough time to run the higher priority TASK_DISPATCH
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_ACCEL], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(7000 + TEST_UPDATE_ACCEL_TIME, simulatedTime);
|
||||
|
||||
simulatedTime += 1000 - TEST_UPDATE_ACCEL_TIME;
|
||||
// TASK_ACCEL should now run as there is not enough time to run the higher priority TASK_DISPATCH
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_ACCEL], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(8000 + TEST_UPDATE_ACCEL_TIME, simulatedTime);
|
||||
|
||||
simulatedTime += 1000 - TEST_UPDATE_ACCEL_TIME;
|
||||
// TASK_ACCEL should now run as there is not enough time to run the higher priority TASK_DISPATCH
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_ACCEL], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(9000 + TEST_UPDATE_ACCEL_TIME, simulatedTime);
|
||||
|
||||
// TASK_DISPATCH has aged whilst not being run
|
||||
EXPECT_EQ(5, tasks[TASK_DISPATCH].taskAgePeriods);
|
||||
simulatedTime += 1000 - TEST_UPDATE_ACCEL_TIME;
|
||||
// TASK_TASK_DISPATCH should now run as the scheduler is on its eighth loop. Note that this is affected by prior test count.
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_DISPATCH], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(10000 + TEST_DISPATCH_TIME, simulatedTime);
|
||||
// TASK_DISPATCH still hasn't been executed
|
||||
EXPECT_EQ(6, tasks[TASK_DISPATCH].taskAgePeriods);
|
||||
|
||||
simulatedTime += 1000 - TEST_DISPATCH_TIME;
|
||||
// TASK_ACCEL should now run again as there is not enough time to run the higher priority TASK_DISPATCH
|
||||
scheduler();
|
||||
EXPECT_EQ(&tasks[TASK_ACCEL], unittest_scheduler_selectedTask);
|
||||
EXPECT_EQ(11000 + TEST_UPDATE_ACCEL_TIME, simulatedTime);
|
||||
}
|
||||
|
||||
TEST(SchedulerUnittest, TestGyroTask)
|
||||
{
|
||||
static const uint32_t startTime = 4000;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue