// use it like so: // auto l = co_away LM.lock(co_await this_coro{}, resource_type, resource_id); // auto l = co_await LM.shared_lock(resource_type, resource_id); // // this_coro{} is a statub; using the await_transform() facilities of promise_type{} // facilites zero-cost access to the current coroutine handle; which is // used for exclusive locks, e.g /* struct this_coro final { // }; template inline T &await_transform(T &&t) { return t; } template inline T &await_transform(const T &t) { return t; } auto await_transform(const this_coro) { struct awaitable final { void *p; awaitable(void *ptr) : p{ptr} { // } inline bool await_ready() const noexcept { return true; } inline auto await_resume() const noexcept { return p; } void await_suspend(std::experimental::coroutine_handle<>) { // } }; return awaitable{std::addressof(suspended_for_exclusive_lock)}; } * */ // definition #include #include #include #include struct livelock_exception final { // }; struct lw_locks_manager final { enum { trace = true, }; // each locked(shared or exclusively) resource // is associated with a resource_lw_lock. struct resource_lw_lock final { switch_dlist suspended_readers{&suspended_readers, &suspended_readers}; switch_dlist suspended_writers{&suspended_writers, &suspended_writers}; int32_t active{0}; // ^^^^^^ // -1: exclusively owned by a coroutine // 0: not locked by any coroutine // > 0 : locked for read(shared) by (active) coroutines void *exclusive_owner{nullptr}; // ^^^^^^ // If the lock is exclusively acquired, this identifies // the coroutine that holds the exclusive lock // See api_impl_task::promise_task::await_transform() }; std::unordered_map> map[64]; struct resource_lw_lock_awaitable final { resource_lw_lock * rl; typename std::remove_reference::type *map; const uint32_t ent_id; const bool for_read; switch_dlist l{&l, &l}; void * contptr; // identifies the coroutine, and also points to a void ** (see impl.) void *coro_ctx; resource_lw_lock_awaitable(const bool fr, decltype(map) m, const uint32_t eid, resource_lw_lock *const l, void *cc) : for_read{fr}, map{m}, ent_id{eid}, rl{l}, coro_ctx{cc} { // } bool await_ready(); struct lock_release final { resource_lw_lock * rl; typename std::remove_reference::type *map; uint32_t ent_id; bool for_read; lock_release() : rl{nullptr} { // } lock_release(const lock_release &) = delete; lock_release(resource_lw_lock *l, decltype(map) m, const uint32_t eid, const bool fr) : rl{l}, map{m}, ent_id{eid}, for_read{fr} { // } lock_release(lock_release &&o) : rl{std::exchange(o.rl, nullptr)}, map{o.map}, ent_id{o.ent_id}, for_read{o.for_read} { // } auto &operator=(lock_release &&o) { // only if initialized /w default ctor rl = std::exchange(o.rl, nullptr); map = std::exchange(o.map, nullptr); ent_id = o.ent_id; for_read = o.for_read; return *this; } ~lock_release(); }; [[nodiscard]] auto await_resume() { return lock_release{ rl, map, ent_id, for_read, }; } void await_suspend(std::experimental::coroutine_handle<> c); }; [[nodiscard]] resource_lw_lock_awaitable shared_lock(const uint8_t entity_type, const uint32_t entity_id); [[nodiscard]] resource_lw_lock_awaitable lock(void *ch, const uint8_t entity_type, const uint32_t entity_id); using lock_guard = resource_lw_lock_awaitable::lock_release; }; extern lw_locks_manager LM; // implementation lw_locks_manager LM; lw_locks_manager::resource_lw_lock_awaitable lw_locks_manager::shared_lock(const uint8_t entity_type, const uint32_t entity_id) { auto *const m = &map[entity_type]; const auto res = m->emplace(entity_id, nullptr); if (res.second) { res.first->second.reset(new resource_lw_lock); } return resource_lw_lock_awaitable{1, m, entity_id, res.first->second.get(), nullptr}; } lw_locks_manager::resource_lw_lock_awaitable lw_locks_manager::lock(void *ch, const uint8_t entity_type, const uint32_t entity_id) { auto *const m = &map[entity_type]; const auto res = m->emplace(entity_id, nullptr); if (res.second) { res.first->second.reset(new resource_lw_lock); } return resource_lw_lock_awaitable{0, m, entity_id, res.first->second.get(), ch}; } bool lw_locks_manager::resource_lw_lock_awaitable::await_ready() { if (for_read) { if (rl->active != -1) { rl->active++; *static_cast(coro_ctx) = nullptr; // this coro is not suspended waiting for excl.lock rl->exclusive_owner = nullptr; // no exlusive owner return true; } } else if (rl->active == 0) { rl->active = -1; *static_cast(coro_ctx) = nullptr; // this coro is not suspended waiting for excl.lock rl->exclusive_owner = coro_ctx; // new exclusive owner return true; } #pragma mark detect livelock auto cur_owner = rl->exclusive_owner; if (cur_owner == coro_ctx) { // livelock: attempted to acquire exclusive ownership of a lock // which is already exclusive owned by this very same coroutine throw livelock_exception{}; } else if (cur_owner) { auto suspended_on = static_cast(*static_cast(cur_owner)); if (suspended_on) { // there is a coroutine with exclusive ownership of that lock // and that co_routine is _also_ suspended waiting for exclusive lock of another lock (suspended_on) // is the owner of that lock us? auto suspended_on_exclusive_owner = suspended_on->exclusive_owner; if (suspended_on_exclusive_owner == coro_ctx) { // the owner of the lock we wish to acquire // is suspended waiting for exclusive access to a lock that this coroutine owns throw livelock_exception{}; } } } return false; } void lw_locks_manager::resource_lw_lock_awaitable::await_suspend(std::experimental::coroutine_handle<> c) { contptr = c.address(); if (for_read) { *static_cast(coro_ctx) = nullptr; // not suspended waiting for an EXCLUSIVE lock rl->suspended_readers.push_back(&l); } else { *static_cast(coro_ctx) = rl; // coro suspended for EXCLUSIVE lock rl->suspended_writers.push_back(&l); } } lw_locks_manager::resource_lw_lock_awaitable::lock_release::~lock_release() { enum { trace = true, }; if (!rl) { return; } // it was co_await()-ed // do we have any one waiting here? if (for_read) { rl->active--; } else { rl->exclusive_owner = nullptr; // there is no exclusive owner anymore for this lock rl->active = 0; } if (rl->active != -1 && !rl->suspended_readers.empty()) { auto next = containerof(resource_lw_lock_awaitable, l, rl->suspended_readers.prev); next->l.detach_and_reset(); rl->active++; std::experimental::coroutine_handle<>::from_address(next->contptr).resume(); } else if (rl->active == 0 && !rl->suspended_writers.empty()) { auto next = containerof(resource_lw_lock_awaitable, l, rl->suspended_writers.prev); next->l.detach_and_reset(); rl->active = -1; if (rl->exclusive_owner) { *static_cast(rl->exclusive_owner) = nullptr; // no longer suspended waiting for for exclusive access to a lock } rl->exclusive_owner = next->coro_ctx; // new exclusive owner std::experimental::coroutine_handle<>::from_address(next->contptr).resume(); } else { // we can get rid of this assert(map->count(ent_id)); map->erase(ent_id); } }