diff --git a/nvsched/Makefile.sources b/nvsched/Makefile.sources new file mode 100644 index 000000000..0e73b67d1 --- /dev/null +++ b/nvsched/Makefile.sources @@ -0,0 +1,25 @@ +# tell Emacs this is a -*- makefile-gmake -*- +# +# Copyright (c) 2021 NVIDIA CORPORATION. All Rights Reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +# Build sources and include for the nv_sched core implementation. This +# file defines a list of source files and header includes that can be +# added to another project. nv_sched alone is not going to do anything: +# it has to be tied to some HW. +# +# The intent is to build this same code into different places. One +# obvious example is a kernel mode driver for the GPU. But this should +# also be buildable, long term, as firmware running on a ucontroller in +# the GPU. + +NVS_SOURCES += src/sched.c \ + src/logging.c \ + src/domain.c + +NVS_INCLUDE += include diff --git a/nvsched/include/nvs/context.h b/nvsched/include/nvs/context.h new file mode 100644 index 000000000..e86decc77 --- /dev/null +++ b/nvsched/include/nvs/context.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#ifndef NVS_CONTEXT_H +#define NVS_CONTEXT_H + +#include + +struct nvs_context; + +/** + * Similar to a nvs_domain_list this is a singly linked list of contexts. + * If sub-scheduler algorithms ever want something more sophisticated they'll + * likely have the build it themselves. + */ +struct nvs_context_list { + u32 nr; + struct nvs_context *contexts; +}; + +#endif diff --git a/nvsched/include/nvs/domain.h b/nvsched/include/nvs/domain.h new file mode 100644 index 000000000..2814e21a0 --- /dev/null +++ b/nvsched/include/nvs/domain.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#ifndef NVS_DOMAIN_H +#define NVS_DOMAIN_H + +#include + +struct nvs_sched; +struct nvs_domain; + +/* + * nvsched provides a simple, singly linked list for keeping track of + * available domains. If algorithms need something more complex, like a + * table of priorities and domains therein, then it will need to build + * these data structures during its init(). + */ +struct nvs_domain_list { + u32 nr; + struct nvs_domain *domains; + + /* + * Convenience for adding a domain quickly. + */ + struct nvs_domain *last; +}; + +struct nvs_domain { + char name[32]; + + struct nvs_context_list *ctx_list; + + /* + * Internal, singly linked list pointer. + */ + struct nvs_domain *next; + + /* + * Scheduling parameters: specify how long this domain should be scheduled + * for and what the grace period the scheduler should give this domain when + * preempting. A value of zero is treated as an infinite timeslice or an + * infinite grace period. + */ + u32 timeslice_us; + u32 preempt_grace_us; + + /* + * Priv pointer for downstream use. + */ + void *priv; +}; + +/** + * @brief Iterate over the list of domains present in the sched. + */ +#define nvs_domain_for_each(sched, domain_ptr) \ + for ((domain_ptr) = (sched)->domain_list->domains; \ + (domain_ptr) != NULL; \ + (domain_ptr) = (domain_ptr)->next) + +struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, + const char *name, u32 timeslice, u32 preempt_grace, + void *priv); +void nvs_domain_destroy(struct nvs_sched *sched, struct nvs_domain *dom); +void nvs_domain_clear_all(struct nvs_sched *sched); +u32 nvs_domain_count(struct nvs_sched *sched); +struct nvs_domain *nvs_domain_by_name(struct nvs_sched *sched, const char *name); + +#endif diff --git a/nvsched/include/nvs/impl-internal.h b/nvsched/include/nvs/impl-internal.h new file mode 100644 index 000000000..d147b37d2 --- /dev/null +++ b/nvsched/include/nvs/impl-internal.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#ifndef NVS_IMPL_INTERNAL_H +#define NVS_IMPL_INTERNAL_H + +/* + * Each implementation of the nvsched code needs to provide a few basic + * functions to use for interaction with the environment. Things such as + * memory allocation and de-allocation. + * + * These should be provided to nvsched as macro definitions as laid out + * below. Each implementation should provide an impl.h header file in an + * accessible header include path that defines these macros. + */ + +#include "impl.h" + +#ifndef nvs_malloc +#error "Missing impl def: nvs_malloc()" +#else +/** + * @brief Allocate and return a pointer to memory. + * + * @param size Size of the memory in bytes. + * + * @return A void pointer to memory containing \a size + * bytes of available memory. + * + * Implementation notes: This may allocate more memory than is strictly + * needed. The \a size argument is an unsigned 64 bit type, u64. + * + * #define nvs_malloc(sched, size) + */ +#endif + +#ifndef nvs_free +#error "Missing impl def: nvs_free()" +#else +/** + * @brief Free a ptr created with nvs_malloc(). + * + * #define nvs_free(sched, ptr) + */ +#endif + +#ifndef nvs_memset +#error "Missing impl def: nvs_memset()" +#else +/** + * @brief Set contents of \a ptr to \a value. + * + * #define nvs_memset(ptr, value, size) + */ +#endif + +#ifndef nvs_timestamp +#error "Missing impl def: nvs_timestamp()" +#else +/** + * @brief Return the current time in _nanoseconds_. Expected return is a s64; this + * makes it easier on Linux. + * + * #define nvs_timestamp() + */ +#endif + +#ifndef nvs_log +#error "Missing impl def: nvs_timestamp()" +#else +/** + * @brief Print a log message; log messages are by definition informational. They + * are likely going to be printed to a uart or something similar so will be very + * slow. + * + * It is up to the integrator to turn them on and off as needed. + * + * #define nvs_log(sched, fmt, ...) + */ +#endif + +#endif diff --git a/nvsched/include/nvs/log.h b/nvsched/include/nvs/log.h new file mode 100644 index 000000000..ab9321c63 --- /dev/null +++ b/nvsched/include/nvs/log.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#ifndef NVS_LOG_H +#define NVS_LOG_H + +#include + +/* + * Default log size; 64K entries at 8 bytes each is 512Kb of space. For a space + * constrained system this is obviously a lot. It can be overridden. + */ +#ifndef NVS_LOG_ENTRIES +#define NVS_LOG_ENTRIES (64 * 1024) +#endif + +/* + * Fast and efficient logging, even on microcontrollers, is an absolute + * must for nvsched. The logging provided here is binary encoded to take up + * a small amount of space and reduce time spent writing the logs. + * + * An implementation of nvsched should decode the logs later, when not in + * a time critical path. The event type can be decoded with nvs_log_event_string(). + */ +enum nvs_event { + NVS_EV_NO_EVENT, + NVS_EV_CREATE_SCHED, + NVS_EV_CREATE_DOMAIN, + NVS_EV_REMOVE_DOMAIN, + NVS_EV_MAX = 0xffffffff /* Force to 32 bit enum size. */ +}; + +struct nvs_sched; + +/** + * @brief A single log event used to track event type, timestamp, etc. Note this + * is 8 byte aligned. + */ +struct nvs_log_event { + u64 timestamp; + u32 data; + enum nvs_event event; +}; + +/** + * Simple circular buffer for putting and getting events. + */ +struct nvs_log_buffer { + struct nvs_log_event *events; + u32 entries; + + u32 get; + u32 put; + + u64 ts_offset; +}; + +int nvs_log_init(struct nvs_sched *sched); +void nvs_log_destroy(struct nvs_sched *sched); +void nvs_log_event(struct nvs_sched *sched, enum nvs_event event, u32 data); +void nvs_log_get(struct nvs_sched *sched, struct nvs_log_event *ev); +const char *nvs_log_event_string(enum nvs_event ev); + +#endif diff --git a/nvsched/include/nvs/sched.h b/nvsched/include/nvs/sched.h new file mode 100644 index 000000000..b80764345 --- /dev/null +++ b/nvsched/include/nvs/sched.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#ifndef NVS_SCHED_H +#define NVS_SCHED_H + +/** + * @page NV Scheduler + * + * Overview + * ======== + * + * An nvs_sched object defines a _scheduler_, this is an object that contains + * information about the domains and contexts to manage, and some operations + * to interact with the underlying HW. The scheduler is split up into three + * distinct parts: + * + * 1. The implementation operations that allow the scheduler to interact + * with a given piece of hardware. This serves as a hardware abstraction + * since the conceptual framework here is not tied to a specific piece of + * HW such as a GPU. + * 2. Algorithm implementations that pick the next context to actually run. + * 3. A core component that defines the data structures which define the + * domains/contexts. The core component is responsible for linking the + * scheduling algorithm outputs to hardware operations. + * + * Implementation Operations + * ========================= + * + * Each concrete implementation of nvsched must provide, at a minimum some + * operations that allow the scheduling logic to interact with the managed + * HW. The two primary operations are preemption and recovery. + * + * Algorithms + * ========== + * + * nvsched splits the data structures from the algorithms. This allows + * multiple algorithms to be supported: for example one implementation could + * use a round-robin approach for picking next domains, but another may wish + * to use a priority based approach. + * + * Core Scheduler + * ============== + * + * The responsibility for the core scheduler is to provide data structures + * that model a two level scheduling model: first there's domains and then + * there's contexts within a domain. An implementation built on top of + * nvsched will need to instantiate domains and contexts and then execute + * some top level operations to trigger scheduling work. + * + * The data structure nesting looks like this: + * + * struct nvs_sched + * +-------------------------+ +---------->+-----------+ + * | | | | preempt() | + * | struct nvs_sched_ops +-----+ | recover() | + * | | +-----------+ + * | // List of: | + * | struct nvs_domain +---------------->+-----------------+ + * | | | Domain 1 | + * | struct nvs_domain_algo +-------+ | Domain Params | + * | | | | Context list +-----+ + * +-------------------------+ | +-----------------+ | + * | | Domain ... | | + * +-------------+ | | Domain Params | | + * | Context 1 |<---------+ | | Context list +---+ | + * +-------------+ | | +-----------------+ | | + * | Context 2 |<---------+ | | Domain N | | | + * +-------------+ | | | Domain Params | | | + * | Context ... |<-----+ | | | Context list +-+ | | + * +-------------+ | | | +-----------------+ | | | + * | Context ... |<-----+ | | | | | + * +-------------+ | | +-------->+-----------------+ | | | + * | Context M |<-+ | | | next_domain() | | | | + * +-------------+ | | | | schedule() | | | | + * | | | | init() | | | | + * | | | +-----------------+ | | | + * +---|---|-----------------------------------+ | | + * +---|-------------------------------------+ | + * +---------------------------------------+ + */ + +#include +#include + +struct nvs_sched; +struct nvs_domain; +struct nvs_domain_algo; +struct nvs_domain_list; +struct nvs_log_buffer; + +/** + * @brief Base scheduling operations an implementation will need to provide + * to the scheduling core. + */ +struct nvs_sched_ops { + /** + * @brief Preempt the running context on the device \a sched + * is managing. + * + * @param sched The scheduler. + */ + int (*preempt)(struct nvs_sched *sched); + + /** + * @brief Recover the running context in \a sched. + */ + int (*recover)(struct nvs_sched *sched); +}; + +/** + * @brief Define a top level scheduler object. + */ +struct nvs_sched { + /** + * Ops that let the scheduler interface with the underlying + * hardware. + */ + struct nvs_sched_ops *ops; + + /** + * List of domains. Internally stored as a singly linked + * list. + * + * @sa struct nvs_domain_list + */ + struct nvs_domain_list *domain_list; + + /** + * Algorithm instance; invoked after a schedule() call. + */ + struct nvs_domain_algo *algorithm; + + /** + * Log buffer with log entries. + */ + struct nvs_log_buffer *log; + + /** + * Implementation private data. + */ + void *priv; +}; + +/** + * @brief Create a scheduler and assign the \a ops and \a priv pointers. + * + * @param sched Pointer to an uninitialized struct sched. + * @param ops Ops defining HW interactions. + * @param priv Private pointer for implementation use. + * + * Build a sched struct in the passed memory \a sched. This pointer should + * have at least sizeof(struct nvs_sched) bytes. nvsched cannot do this allocation + * since the nvs_malloc() function relies on the sched object (some APIs require + * an API token of some sort which they may choose to embed in sched->priv. + * + * The ops struct should be in memory that will not be reclaimed until after the + * \a sched memory is reclaimed. + * + * \a priv may be used by implementations where needed. The priv pointer contents + * will never be touched by nvsched. + */ +int nvs_sched_create(struct nvs_sched *sched, + struct nvs_sched_ops *ops, void *priv); + +void nvs_sched_close(struct nvs_sched *sched); + +#endif diff --git a/nvsched/include/nvs/types-internal.h b/nvsched/include/nvs/types-internal.h new file mode 100644 index 000000000..d71ccdc87 --- /dev/null +++ b/nvsched/include/nvs/types-internal.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#ifndef NVS_TYPES_INTERNAL_H +#define NVS_TYPES_INTERNAL_H + +/* + * If an implementation decides to it can provide a types.h header that + * nvsched will attempt to include. If so then the below define should + * be set. + * + * If no types.h is passed then stdint.h is used to build the expected + * types in nvsched. + */ +#ifdef NVS_USE_IMPL_TYPES +#include "types.h" +#else +#include + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; +#endif + +#endif diff --git a/nvsched/src/domain.c b/nvsched/src/domain.c new file mode 100644 index 000000000..b95468689 --- /dev/null +++ b/nvsched/src/domain.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#include +#include +#include + +/* + * Create and add a new domain to the end of the domain list. + */ +struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, + const char *name, u32 timeslice, u32 preempt_grace, + void *priv) +{ + struct nvs_domain_list *dlist = sched->domain_list; + struct nvs_domain *dom = nvs_malloc(sched, sizeof(*dom)); + + nvs_log(sched, "Creating domain - %s", name); + + if (dom == NULL) { + return NULL; + } + + nvs_memset(dom, 0, sizeof(*dom)); + + strncpy(dom->name, name, sizeof(dom->name) - 1); + dom->timeslice_us = timeslice; + dom->preempt_grace_us = preempt_grace; + dom->priv = priv; + + nvs_log_event(sched, NVS_EV_CREATE_DOMAIN, 0U); + + /* + * Now add the domain to the list of domains. If this is the first + * domain we are done. Otherwise use the last pointer to quickly + * append the domain. + */ + dlist->nr++; + if (dlist->domains == NULL) { + dlist->domains = dom; + dlist->last = dom; + return dom; + } + + dlist->last->next = dom; + dlist->last = dom; + + nvs_log(sched, "%s: Domain added", name); + return dom; +} + +/* + * Unlink the domain from our list and clear the last pointer if this was the + * only one remaining. + */ +static void nvs_domain_unlink(struct nvs_sched *sched, + struct nvs_domain *dom) +{ + struct nvs_domain_list *dlist = sched->domain_list; + struct nvs_domain *tmp; + + + if (dlist->domains == dom) { + dlist->domains = dom->next; + + /* + * If dom == last and dom is the first entry, then we have a + * single entry in the list and we need to clear the last + * pointer as well. + */ + if (dom == dlist->last) { + dlist->last = NULL; + } + return; + } + + nvs_domain_for_each(sched, tmp) { + /* + * If tmp's next pointer is dom, then we take tmp and have it + * skip over dom. But also don't forget to handle last; if dom + * is last, then last becomes the one before it. + */ + if (dom == tmp->next) { + tmp->next = dom->next; + if (dom == dlist->last) { + dlist->last = tmp; + } + return; + } + } +} + +void nvs_domain_destroy(struct nvs_sched *sched, + struct nvs_domain *dom) +{ + nvs_log_event(sched, NVS_EV_REMOVE_DOMAIN, 0); + + nvs_domain_unlink(sched, dom); + + nvs_memset(dom, 0, sizeof(*dom)); + nvs_free(sched, dom); + + sched->domain_list->nr--; +} + +void nvs_domain_clear_all(struct nvs_sched *sched) +{ + struct nvs_domain_list *dlist = sched->domain_list; + + while (dlist->domains != NULL) { + nvs_domain_destroy(sched, dlist->domains); + } +} + +u32 nvs_domain_count(struct nvs_sched *sched) +{ + return sched->domain_list->nr; +} + +struct nvs_domain *nvs_domain_by_name(struct nvs_sched *sched, const char *name) +{ + struct nvs_domain *domain; + + nvs_domain_for_each(sched, domain) { + if (strcmp(domain->name, name) == 0) { + return domain; + } + } + + return NULL; +} diff --git a/nvsched/src/logging.c b/nvsched/src/logging.c new file mode 100644 index 000000000..1a9ae1bab --- /dev/null +++ b/nvsched/src/logging.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#include +#include +#include + +#define LOG_INC(value, max) \ + do { \ + (value) += 1U; \ + if ((value) >= (max)) { \ + (value) = 0U; \ + } \ + } while (false) + +static bool nvs_log_full(struct nvs_log_buffer *logger) +{ + u32 updated_put = logger->put; + + LOG_INC(updated_put, logger->entries); + + /* + * If the next put is the same as get, then put has caught up to get, + * and the log is therefore full. + */ + return updated_put == logger->get; +} + +int nvs_log_init(struct nvs_sched *sched) +{ + struct nvs_log_buffer *logger; + + logger = nvs_malloc(sched, sizeof(*logger)); + if (logger == NULL) { + return -ENOMEM; + } + + nvs_memset(logger, 0, sizeof(*logger)); + + logger->ts_offset = nvs_timestamp(); + logger->entries = NVS_LOG_ENTRIES; + logger->events = nvs_malloc(sched, + NVS_LOG_ENTRIES * sizeof(*logger->events)); + if (logger->events == NULL) { + nvs_free(sched, logger); + return -ENOMEM; + } + + nvs_memset(logger->events, 0, + NVS_LOG_ENTRIES * sizeof(*logger->events)); + + sched->log = logger; + + return 0; +} + +void nvs_log_destroy(struct nvs_sched *sched) +{ + nvs_free(sched, sched->log->events); + nvs_free(sched, sched->log); + sched->log = NULL; +} + +void nvs_log_event(struct nvs_sched *sched, enum nvs_event event, u32 data) +{ + struct nvs_log_buffer *logger = sched->log; + struct nvs_log_event *ev; + + nvs_log(sched, "ev: %d", event); + nvs_log(sched, " Starting: G=%05u P=%05u", logger->get, logger->put); + + /* + * If the log fills, just consume the oldest entry like with nvs_log_get(). + * + * TODO: insert a "log overrun" entry, too, so readers will know. + */ + if (nvs_log_full(logger)) { + nvs_log(sched, "Log full; killing entry."); + LOG_INC(logger->get, logger->entries); + } + + ev = &logger->events[logger->put]; + ev->data = data; + ev->event = event; + ev->timestamp = nvs_timestamp() - logger->ts_offset; + + LOG_INC(logger->put, logger->entries); + nvs_log(sched, " New: G=%05u P=%05u", logger->get, logger->put); +} + +void nvs_log_get(struct nvs_sched *sched, struct nvs_log_event *ev) +{ + struct nvs_log_buffer *logger = sched->log; + + nvs_log(sched, "Getting log event."); + nvs_log(sched, " Starting: G=%05u P=%05u", logger->get, logger->put); + + /* + * Check if the log is empty; if so, clear *ev to signal that. + */ + if (logger->get == logger->put) { + ev->event = NVS_EV_NO_EVENT; + nvs_log(sched, " Log empty!"); + return; + } + + *ev = logger->events[logger->get]; + LOG_INC(logger->get, logger->entries); + + nvs_log(sched, " New: G=%05u P=%05u", logger->get, logger->put); +} + +const char *nvs_log_event_string(enum nvs_event ev) +{ + switch (ev) { + case NVS_EV_NO_EVENT: return "No event"; + case NVS_EV_CREATE_SCHED: return "Create scheduler"; + case NVS_EV_CREATE_DOMAIN: return "Create domain"; + case NVS_EV_REMOVE_DOMAIN: return "Remove domain"; + case NVS_EV_MAX: return "Invalid MAX event"; + } + + return "Undefined event"; +} diff --git a/nvsched/src/sched.c b/nvsched/src/sched.c new file mode 100644 index 000000000..7ec4216c5 --- /dev/null +++ b/nvsched/src/sched.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA Corporation is strictly prohibited. + */ + +#include +#include +#include + +int nvs_sched_create(struct nvs_sched *sched, + struct nvs_sched_ops *ops, void *priv) +{ + int err; + + if (ops == NULL) { + return -EINVAL; + } + + nvs_memset(sched, 0, sizeof(*sched)); + + sched->ops = ops; + sched->priv = priv; + + sched->domain_list = nvs_malloc(sched, sizeof(*sched->domain_list)); + if (sched->domain_list == NULL) { + return -ENOMEM; + } + + nvs_memset(sched->domain_list, 0, sizeof(*sched->domain_list)); + + err = nvs_log_init(sched); + if (err != 0) { + nvs_free(sched, sched->domain_list); + return err; + } + + nvs_log_event(sched, NVS_EV_CREATE_SCHED, 0U); + + return 0; +} + +void nvs_sched_close(struct nvs_sched *sched) +{ + nvs_domain_clear(sched); + nvs_free(sched, sched->domain_list); + nvs_log_destroy(sched); + + nvs_memset(sched, 0, sizeof(*sched)); +}