refactor: Move to `k_work_delayable` API.

* Move to new `k_work_delayable` APIs introduced in Zephyr 2.6.

See: https://docs.zephyrproject.org/latest/releases/release-notes-2.6.html#api-changes
This commit is contained in:
Peter Johanson 2021-11-05 04:13:38 +00:00 committed by Pete Johanson
parent 28ef19488d
commit 53dae35710
11 changed files with 52 additions and 70 deletions

View File

@ -47,7 +47,7 @@ struct kscan_gpio_item_config {
#define GPIO_INST_INIT(n) \
struct kscan_gpio_irq_callback_##n { \
struct CHECK_DEBOUNCE_CFG(n, (k_work), (k_delayed_work)) * work; \
struct CHECK_DEBOUNCE_CFG(n, (k_work), (k_work_delayable)) * work; \
struct gpio_callback callback; \
const struct device *dev; \
}; \
@ -60,7 +60,7 @@ struct kscan_gpio_item_config {
struct kscan_gpio_data_##n { \
kscan_callback_t callback; \
struct k_timer poll_timer; \
struct CHECK_DEBOUNCE_CFG(n, (k_work), (k_delayed_work)) work; \
struct CHECK_DEBOUNCE_CFG(n, (k_work), (k_work_delayable)) work; \
bool matrix_state[INST_MATRIX_INPUTS(n)][INST_MATRIX_OUTPUTS(n)]; \
const struct device *rows[INST_MATRIX_INPUTS(n)]; \
const struct device *cols[INST_MATRIX_OUTPUTS(n)]; \
@ -137,10 +137,8 @@ struct kscan_gpio_item_config {
} \
} \
if (submit_follow_up_read) { \
CHECK_DEBOUNCE_CFG(n, ({ k_work_submit(&data->work); }), ({ \
k_delayed_work_cancel(&data->work); \
k_delayed_work_submit(&data->work, K_MSEC(5)); \
})) \
CHECK_DEBOUNCE_CFG(n, ({ k_work_submit(&data->work); }), \
({ k_work_reschedule(&data->work, K_MSEC(5)); })) \
} \
return 0; \
} \
@ -232,7 +230,7 @@ struct kscan_gpio_item_config {
\
k_timer_init(&data->poll_timer, kscan_gpio_timer_handler, NULL); \
\
(CHECK_DEBOUNCE_CFG(n, (k_work_init), (k_delayed_work_init)))( \
(CHECK_DEBOUNCE_CFG(n, (k_work_init), (k_work_init_delayable)))( \
&data->work, kscan_gpio_work_handler_##n); \
return 0; \
} \

View File

@ -22,7 +22,7 @@ struct kscan_gpio_item_config {
};
union work_reference {
struct k_delayed_work delayed;
struct k_work_delayable delayed;
struct k_work direct;
};
@ -55,8 +55,7 @@ static const struct kscan_gpio_item_config *kscan_gpio_input_configs(const struc
static void kscan_gpio_direct_queue_read(union work_reference *work, uint8_t debounce_period) {
if (debounce_period > 0) {
k_delayed_work_cancel(&work->delayed);
k_delayed_work_submit(&work->delayed, K_MSEC(debounce_period));
k_work_reschedule(&work->delayed, K_MSEC(debounce_period));
} else {
k_work_submit(&work->direct);
}
@ -228,7 +227,7 @@ static const struct kscan_driver_api gpio_driver_api = {
COND_CODE_1(IS_ENABLED(CONFIG_ZMK_KSCAN_DIRECT_POLLING), \
(k_timer_init(&data->poll_timer, kscan_gpio_timer_handler, NULL);), ()) \
if (cfg->debounce_period > 0) { \
k_delayed_work_init(&data->work.delayed, kscan_gpio_work_handler); \
k_work_init_delayable(&data->work.delayed, kscan_gpio_work_handler); \
} else { \
k_work_init(&data->work.direct, kscan_gpio_work_handler); \
} \

View File

@ -88,7 +88,7 @@ struct kscan_matrix_irq_callback {
struct kscan_matrix_data {
const struct device *dev;
kscan_callback_t callback;
struct k_delayed_work work;
struct k_work_delayable work;
#if USE_INTERRUPTS
/** Array of length config->inputs.len */
struct kscan_matrix_irq_callback *irqs;
@ -214,9 +214,7 @@ static void kscan_matrix_irq_callback_handler(const struct device *port, struct
data->scan_time = k_uptime_get();
// TODO (Zephyr 2.6): use k_work_reschedule()
k_delayed_work_cancel(&data->work);
k_delayed_work_submit(&data->work, K_NO_WAIT);
k_work_reschedule(&data->work, K_NO_WAIT);
}
#endif
@ -226,9 +224,7 @@ static void kscan_matrix_read_continue(const struct device *dev) {
data->scan_time += config->debounce_scan_period_ms;
// TODO (Zephyr 2.6): use k_work_reschedule()
k_delayed_work_cancel(&data->work);
k_delayed_work_submit(&data->work, K_TIMEOUT_ABS_MS(data->scan_time));
k_work_reschedule(&data->work, K_TIMEOUT_ABS_MS(data->scan_time));
}
static void kscan_matrix_read_end(const struct device *dev) {
@ -242,9 +238,7 @@ static void kscan_matrix_read_end(const struct device *dev) {
data->scan_time += config->poll_period_ms;
// Return to polling slowly.
// TODO (Zephyr 2.6): use k_work_reschedule()
k_delayed_work_cancel(&data->work);
k_delayed_work_submit(&data->work, K_TIMEOUT_ABS_MS(data->scan_time));
k_work_reschedule(&data->work, K_TIMEOUT_ABS_MS(data->scan_time));
#endif
}
@ -311,7 +305,7 @@ static int kscan_matrix_read(const struct device *dev) {
}
static void kscan_matrix_work_handler(struct k_work *work) {
struct k_delayed_work *dwork = CONTAINER_OF(work, struct k_delayed_work, work);
struct k_work_delayable *dwork = CONTAINER_OF(work, struct k_work_delayable, work);
struct kscan_matrix_data *data = CONTAINER_OF(dwork, struct kscan_matrix_data, work);
kscan_matrix_read(data->dev);
}
@ -339,7 +333,7 @@ static int kscan_matrix_enable(const struct device *dev) {
static int kscan_matrix_disable(const struct device *dev) {
struct kscan_matrix_data *data = dev->data;
k_delayed_work_cancel(&data->work);
k_work_cancel_delayable(&data->work);
#if USE_INTERRUPTS
return kscan_matrix_interrupt_disable(dev);
@ -434,7 +428,7 @@ static int kscan_matrix_init(const struct device *dev) {
kscan_matrix_init_outputs(dev);
kscan_matrix_set_all_outputs(dev, 0);
k_delayed_work_init(&data->work, kscan_matrix_work_handler);
k_work_init_delayable(&data->work, kscan_matrix_work_handler);
return 0;
}

View File

@ -19,14 +19,14 @@ struct kscan_mock_data {
kscan_callback_t callback;
uint32_t event_index;
struct k_delayed_work work;
struct k_work_delayable work;
const struct device *dev;
};
static int kscan_mock_disable_callback(const struct device *dev) {
struct kscan_mock_data *data = dev->data;
k_delayed_work_cancel(&data->work);
k_work_cancel_delayable(&data->work);
return 0;
}
@ -54,7 +54,7 @@ static int kscan_mock_configure(const struct device *dev, kscan_callback_t callb
if (data->event_index < DT_INST_PROP_LEN(n, events)) { \
uint32_t ev = cfg->events[data->event_index]; \
LOG_DBG("delaying next keypress: %d", ZMK_MOCK_MSEC(ev)); \
k_delayed_work_submit(&data->work, K_MSEC(ZMK_MOCK_MSEC(ev))); \
k_work_schedule(&data->work, K_MSEC(ZMK_MOCK_MSEC(ev))); \
} else if (cfg->exit_after) { \
LOG_DBG("Exiting"); \
exit(0); \
@ -73,7 +73,7 @@ static int kscan_mock_configure(const struct device *dev, kscan_callback_t callb
static int kscan_mock_init_##n(const struct device *dev) { \
struct kscan_mock_data *data = dev->data; \
data->dev = dev; \
k_delayed_work_init(&data->work, kscan_mock_work_handler_##n); \
k_work_init_delayable(&data->work, kscan_mock_work_handler_##n); \
return 0; \
} \
static int kscan_mock_enable_callback_##n(const struct device *dev) { \
@ -88,7 +88,7 @@ static int kscan_mock_configure(const struct device *dev, kscan_callback_t callb
static struct kscan_mock_data kscan_mock_data_##n; \
static const struct kscan_mock_config_##n kscan_mock_config_##n = { \
.events = DT_INST_PROP(n, events), .exit_after = DT_INST_PROP(n, exit_after)}; \
DEVICE_DT_INST_DEFINE(n, kscan_mock_init_##n, device_pm_control_nop, &kscan_mock_data_##n, \
DEVICE_DT_INST_DEFINE(n, kscan_mock_init_##n, NULL, &kscan_mock_data_##n, \
&kscan_mock_config_##n, APPLICATION, \
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &mock_driver_api_##n);

View File

@ -22,7 +22,7 @@ struct q_item {
K_MSGQ_DEFINE(zmk_behavior_queue_msgq, sizeof(struct q_item), CONFIG_ZMK_BEHAVIORS_QUEUE_SIZE, 4);
static void behavior_queue_process_next(struct k_work *work);
static K_DELAYED_WORK_DEFINE(queue_work, behavior_queue_process_next);
static K_WORK_DELAYABLE_DEFINE(queue_work, behavior_queue_process_next);
static void behavior_queue_process_next(struct k_work *work) {
struct q_item item = {.wait = 0};
@ -43,7 +43,7 @@ static void behavior_queue_process_next(struct k_work *work) {
LOG_DBG("Processing next queued behavior in %dms", item.wait);
if (item.wait > 0) {
k_delayed_work_submit(&queue_work, K_MSEC(item.wait));
k_work_schedule(&queue_work, K_MSEC(item.wait));
break;
}
}
@ -58,7 +58,7 @@ int zmk_behavior_queue_add(uint32_t position, const struct zmk_behavior_binding
return ret;
}
if (!k_delayed_work_pending(&queue_work)) {
if (!k_work_delayable_is_pending(&queue_work)) {
behavior_queue_process_next(&queue_work.work);
}

View File

@ -23,7 +23,7 @@ LOG_MODULE_DECLARE(zmk, CONFIG_ZMK_LOG_LEVEL);
#define ZMK_BHV_TAP_DANCE_MAX_HELD 10
#define ZMK_BHV_TAP_DANCE_POSITION_FREE ULONG_MAX
#define ZMK_BHV_TAP_DANCE_POSITION_FREE UINT32_MAX
struct behavior_tap_dance_config {
uint32_t tapping_term_ms;
@ -45,7 +45,7 @@ struct active_tap_dance {
bool timer_cancelled;
bool tap_dance_decided;
int64_t release_at;
struct k_delayed_work release_timer;
struct k_work_delayable release_timer;
};
struct active_tap_dance active_tap_dances[ZMK_BHV_TAP_DANCE_MAX_HELD] = {};
@ -84,7 +84,7 @@ static void clear_tap_dance(struct active_tap_dance *tap_dance) {
}
static int stop_timer(struct active_tap_dance *tap_dance) {
int timer_cancel_result = k_delayed_work_cancel(&tap_dance->release_timer);
int timer_cancel_result = k_work_cancel_delayable(&tap_dance->release_timer);
if (timer_cancel_result == -EINPROGRESS) {
// too late to cancel, we'll let the timer handler clear up.
tap_dance->timer_cancelled = true;
@ -97,7 +97,7 @@ static void reset_timer(struct active_tap_dance *tap_dance,
tap_dance->release_at = event.timestamp + tap_dance->config->tapping_term_ms;
int32_t ms_left = tap_dance->release_at - k_uptime_get();
if (ms_left > 0) {
k_delayed_work_submit(&tap_dance->release_timer, K_MSEC(ms_left));
k_work_schedule(&tap_dance->release_timer, K_MSEC(ms_left));
LOG_DBG("Successfully reset timer at position %d", tap_dance->position);
}
}
@ -228,8 +228,8 @@ static int behavior_tap_dance_init(const struct device *dev) {
static bool init_first_run = true;
if (init_first_run) {
for (int i = 0; i < ZMK_BHV_TAP_DANCE_MAX_HELD; i++) {
k_delayed_work_init(&active_tap_dances[i].release_timer,
behavior_tap_dance_timer_handler);
k_work_init_delayable(&active_tap_dances[i].release_timer,
behavior_tap_dance_timer_handler);
clear_tap_dance(&active_tap_dances[i]);
}
}
@ -250,9 +250,9 @@ static int behavior_tap_dance_init(const struct device *dev) {
.tapping_term_ms = DT_INST_PROP(n, tapping_term_ms), \
.behaviors = behavior_tap_dance_config_##n##_bindings, \
.behavior_count = DT_INST_PROP_LEN(n, bindings)}; \
DEVICE_AND_API_INIT(behavior_tap_dance_##n, DT_INST_LABEL(n), behavior_tap_dance_init, NULL, \
&behavior_tap_dance_config_##n, APPLICATION, \
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &behavior_tap_dance_driver_api);
DEVICE_DT_INST_DEFINE(n, behavior_tap_dance_init, device_pm_control_nop, NULL, \
&behavior_tap_dance_config_##n, APPLICATION, \
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &behavior_tap_dance_driver_api);
DT_INST_FOREACH_STATUS_OKAY(KP_INST)

View File

@ -247,13 +247,12 @@ static void ble_save_profile_work(struct k_work *work) {
settings_save_one("ble/active_profile", &active_profile, sizeof(active_profile));
}
static struct k_delayed_work ble_save_work;
static struct k_work_delayable ble_save_work;
#endif
static int ble_save_profile() {
#if IS_ENABLED(CONFIG_SETTINGS)
k_delayed_work_cancel(&ble_save_work);
return k_delayed_work_submit(&ble_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
return k_work_reschedule(&ble_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
#else
return 0;
#endif
@ -391,11 +390,6 @@ static void connected(struct bt_conn *conn, uint8_t err) {
LOG_DBG("Connected %s", log_strdup(addr));
err = bt_conn_le_param_update(conn, BT_LE_CONN_PARAM(0x0006, 0x000c, 30, 400));
if (err) {
LOG_WRN("Failed to update LE parameters (err %d)", err);
}
#if IS_SPLIT_PERIPHERAL
bt_conn_le_phy_update(conn, BT_CONN_LE_PHY_PARAM_2M);
#endif
@ -505,7 +499,7 @@ static enum bt_security_err auth_pairing_accept(struct bt_conn *conn,
bt_conn_get_info(conn, &info);
LOG_DBG("role %d, open? %s", info.role, zmk_ble_active_profile_is_open() ? "yes" : "no");
if (info.role == BT_CONN_ROLE_SLAVE && !zmk_ble_active_profile_is_open()) {
if (info.role == BT_CONN_ROLE_PERIPHERAL && !zmk_ble_active_profile_is_open()) {
LOG_WRN("Rejecting pairing request to taken profile %d", active_profile);
return BT_SECURITY_ERR_PAIR_NOT_ALLOWED;
}
@ -522,7 +516,7 @@ static void auth_pairing_complete(struct bt_conn *conn, bool bonded) {
bt_addr_le_to_str(dst, addr, sizeof(addr));
bt_conn_get_info(conn, &info);
if (info.role != BT_CONN_ROLE_SLAVE) {
if (info.role != BT_CONN_ROLE_PERIPHERAL) {
LOG_DBG("SKIPPING FOR ROLE %d", info.role);
return;
}
@ -579,7 +573,7 @@ static int zmk_ble_init(const struct device *_arg) {
return err;
}
k_delayed_work_init(&ble_save_work, ble_save_profile_work);
k_work_init_delayable(&ble_save_work, ble_save_profile_work);
settings_load_subtree("ble");
settings_load_subtree("bt");

View File

@ -67,7 +67,7 @@ struct combo_cfg *combo_lookup[ZMK_KEYMAP_LEN][CONFIG_ZMK_COMBO_MAX_COMBOS_PER_K
struct active_combo active_combos[CONFIG_ZMK_COMBO_MAX_PRESSED_COMBOS] = {NULL};
int active_combo_count = 0;
struct k_delayed_work timeout_task;
struct k_work_delayable timeout_task;
int64_t timeout_task_timeout_at;
// Store the combo key pointer in the combos array, one pointer for each key position
@ -370,7 +370,7 @@ static bool release_combo_key(int32_t position, int64_t timestamp) {
}
static int cleanup() {
k_delayed_work_cancel(&timeout_task);
k_work_cancel_delayable(&timeout_task);
clear_candidates();
if (fully_pressed_combo != NULL) {
activate_combo(fully_pressed_combo);
@ -386,10 +386,10 @@ static void update_timeout_task() {
}
if (first_timeout == LLONG_MAX) {
timeout_task_timeout_at = 0;
k_delayed_work_cancel(&timeout_task);
k_work_cancel_delayable(&timeout_task);
return;
}
if (k_delayed_work_submit(&timeout_task, K_MSEC(first_timeout - k_uptime_get())) == 0) {
if (k_work_schedule(&timeout_task, K_MSEC(first_timeout - k_uptime_get())) == 0) {
timeout_task_timeout_at = first_timeout;
}
}
@ -486,7 +486,7 @@ ZMK_SUBSCRIPTION(combo, zmk_position_state_changed);
DT_INST_FOREACH_CHILD(0, COMBO_INST)
static int combo_init() {
k_delayed_work_init(&timeout_task, combo_timeout_handler);
k_work_init_delayable(&timeout_task, combo_timeout_handler);
DT_INST_FOREACH_CHILD(0, INITIALIZE_COMBO);
return 0;
}

View File

@ -35,13 +35,12 @@ static void endpoints_save_preferred_work(struct k_work *work) {
settings_save_one("endpoints/preferred", &preferred_endpoint, sizeof(preferred_endpoint));
}
static struct k_delayed_work endpoints_save_work;
static struct k_work_delayable endpoints_save_work;
#endif
static int endpoints_save_preferred() {
#if IS_ENABLED(CONFIG_SETTINGS)
k_delayed_work_cancel(&endpoints_save_work);
return k_delayed_work_submit(&endpoints_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
return k_work_reschedule(&endpoints_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
#else
return 0;
#endif
@ -182,7 +181,7 @@ static int zmk_endpoints_init(const struct device *_arg) {
return err;
}
k_delayed_work_init(&endpoints_save_work, endpoints_save_preferred_work);
k_work_init_delayable(&endpoints_save_work, endpoints_save_preferred_work);
settings_load_subtree("endpoints");
#endif

View File

@ -47,13 +47,12 @@ static void ext_power_save_state_work(struct k_work *work) {
settings_save_one(setting_path, &data->status, sizeof(data->status));
}
static struct k_delayed_work ext_power_save_work;
static struct k_work_delayable ext_power_save_work;
#endif
int ext_power_save_state() {
#if IS_ENABLED(CONFIG_SETTINGS)
k_delayed_work_cancel(&ext_power_save_work);
return k_delayed_work_submit(&ext_power_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
return k_work_reschedule(&ext_power_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
#else
return 0;
#endif
@ -156,14 +155,14 @@ static int ext_power_generic_init(const struct device *dev) {
return err;
}
k_delayed_work_init(&ext_power_save_work, ext_power_save_state_work);
k_work_init_delayable(&ext_power_save_work, ext_power_save_state_work);
// Set default value (on) if settings isn't set
settings_load_subtree("ext_power");
if (!data->settings_init) {
data->status = true;
k_delayed_work_submit(&ext_power_save_work, K_NO_WAIT);
k_work_schedule(&ext_power_save_work, K_NO_WAIT);
ext_power_enable(dev);
}

View File

@ -220,7 +220,7 @@ static void zmk_rgb_underglow_save_state_work() {
settings_save_one("rgb/underglow/state", &state, sizeof(state));
}
static struct k_delayed_work underglow_save_work;
static struct k_work_delayable underglow_save_work;
#endif
static int zmk_rgb_underglow_init(const struct device *_arg) {
@ -260,7 +260,7 @@ static int zmk_rgb_underglow_init(const struct device *_arg) {
return err;
}
k_delayed_work_init(&underglow_save_work, zmk_rgb_underglow_save_state_work);
k_work_init_delayable(&underglow_save_work, zmk_rgb_underglow_save_state_work);
settings_load_subtree("rgb/underglow");
#endif
@ -272,8 +272,7 @@ static int zmk_rgb_underglow_init(const struct device *_arg) {
int zmk_rgb_underglow_save_state() {
#if IS_ENABLED(CONFIG_SETTINGS)
k_delayed_work_cancel(&underglow_save_work);
return k_delayed_work_submit(&underglow_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
return k_work_reschedule(&underglow_save_work, K_MSEC(CONFIG_ZMK_SETTINGS_SAVE_DEBOUNCE));
#else
return 0;
#endif