Honeycomb  0.1
Component-Model Framework
UnorderedMap.h
Go to the documentation of this file.
1 // Honeycomb, Copyright (C) 2015 NewGamePlus Inc. Distributed under the Boost Software License v1.0.
2 #pragma once
3 
5 
6 namespace honey { namespace lockfree
7 {
8 
10 
13 template<class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
15 {
16  typedef typename FreeList<T>::Handle Handle;
17  typedef typename FreeList<T>::TaggedHandle TaggedHandle;
18 
20  struct MarkedHandle : TaggedHandle
21  {
22  using typename TaggedHandle::Int;
23  using TaggedHandle::tag;
24 
25  MarkedHandle() = default;
26  MarkedHandle(Handle handle, Int tag, bool mark) : TaggedHandle(handle, (tag<<1) | mark) {}
27 
28  Int nextTag() const { return (tag>>1)+1; }
29  bool mark() const { return tag & true; }
30  void mark(bool b) { tag = (tag & (~Int(0)<<1)) | b; }
31  };
32 
33  struct Node
34  {
35  //init without overwriting previous tag
36  template<class Pair>
37  Node(Pair&& pair) :
38  key(forward<typename Pair::first_type>(pair.first)),
39  soKey(0),
40  val(forward<typename Pair::second_type>(pair.second))
41  {
42  next.store(MarkedHandle(nullptr, next.load(atomic::Order::relaxed).nextTag(), false), atomic::Order::relaxed);
43  }
44 
45  Key key;
46  szt soKey;
47  T val;
49  };
50 
51  typedef FreeList<Node> FreeList;
52 
53 public:
54  typedef T value_type;
55 
56  UnorderedMap(szt capacity = 0, const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual()) :
57  _freeList(capacity),
58  _hash(hash),
59  _equal(equal),
60  _segmentCount(0),
61  _bucketCount(0),
62  _size(0),
63  _maxLoadFactor(4) { expand(true); }
64 
66 
68  void reserve(szt capacity) { _freeList.reserve(capacity); }
70  szt capacity() const { return _freeList.capacity(); }
71 
73  template<class Pair>
74  bool insert(Pair&& pair)
75  {
76  Node* node = _freeList.construct(forward<Pair>(pair));
77  szt hash = _hash(node->key);
78  node->soKey = BitOp::reverse(hash) | true; //regular key has LSB set
79  auto& bucket = getBucket(hash % _bucketCount);
80  if (!get<0>(list_insert(bucket, *node))) { _freeList.destroy(node); return false; }
81  ++_size;
82  if (load_factor() > max_load_factor()) expand();
83  return true;
84  }
85 
87  bool erase(const Key& key, optional<T&> val = optnull)
88  {
89  szt hash = _hash(key);
90  szt soKey = BitOp::reverse(hash) | true; //regular key has LSB set
91  auto& bucket = getBucket(hash % _bucketCount);
92  if (!list_delete(bucket, key, soKey, val)) return false;
93  --_size;
94  return true;
95  }
96 
98  void clear()
99  {
100  while (true)
101  {
102  Node* prev = &getBucket(0);
103  MarkedHandle cur = prev->next.load(atomic::Order::acquire);
104  while (true)
105  {
106  if (!cur) return;
107  MarkedHandle next = _freeList.deref(cur)->next.load(atomic::Order::acquire);
108  auto ckey = _freeList.deref(cur)->key;
109  auto csoKey = _freeList.deref(cur)->soKey;
110  if (prev->next.load(atomic::Order::acquire) != cur) break; //try again
111  if (!next.mark())
112  {
113  //regular key has LSB set
114  if (csoKey & true)
115  {
116  erase(ckey);
117  cur = MarkedHandle(next, cur.nextTag(), false);
118  }
119  else
120  {
121  prev = _freeList.deref(cur);
122  cur = next;
123  }
124  }
125  else
126  {
127  auto next_ = MarkedHandle(next, cur.nextTag(), false);
128  if (prev->next.cas(next_, cur))
129  {
130  _freeList.destroy(_freeList.deref(cur));
131  cur = next_;
132  }
133  else
134  break; //try again
135  }
136  }
137  }
138  }
139 
141  bool find(const Key& key, optional<T&> val = optnull) const
142  {
143  szt hash = _hash(key);
144  szt soKey = BitOp::reverse(hash) | true; //regular key has LSB set
145  auto& bucket = getBucket(hash % _bucketCount);
146 
147  bool found; Node* prev; MarkedHandle cur, next;
148  do
149  {
150  tie(found, prev, cur, next) = list_find(bucket, key, soKey);
151  if (!found) return false;
152  //ensure val we read is consistent with prev, otherwise we could end up returning a destroyed value
153  if (val) val = _freeList.deref(cur)->val;
154  } while (prev->next.load(atomic::Order::acquire) != cur);
155  return true;
156  }
157 
159  szt count(const Key& key) const { return find(key); }
160 
162  bool empty() const { return !_size; }
164  szt size() const { return _size; }
166  szt bucket_count() const { return _bucketCount; }
168  float load_factor() const { return _bucketCount ? float(_size) / _bucketCount : _size ? numeral<float>().inf() : 0; }
170  float max_load_factor() const { return _maxLoadFactor; }
172  void max_load_factor(float f) { _maxLoadFactor = f; expand(); }
173 
174 private:
176  void expand(bool init = false)
177  {
178  SpinLock::Scoped _(_lock);
179  while (init || load_factor() > max_load_factor())
180  {
181  szt count = _segmentCount ? (1<<_segmentCount) : 2;
182  auto buckets = make_unique<Atomic<MarkedHandle>[]>(count);
183  std::fill_n(buckets.get(), count, MarkedHandle());
184  assert(_segmentCount < _segments.size(), "Max segments reached");
185  _segments[_segmentCount] = move(buckets);
186  ++_segmentCount;
187  _bucketCount += count;
188  if (init) break;
189  }
190  }
191 
192  Node& getBucket(szt i) const
193  {
194  szt segment = BitOp::log2Floor(i ? i : 1);
195  Node* parent = i ? &getBucket(~(1<<segment) & i) : nullptr; //parent is index with MSB unset
196  auto& bucket = _segments[segment][i >= 2 ? i - (1<<segment) : i];
197  MarkedHandle old = bucket.load(atomic::Order::acquire);
198  if (!old)
199  {
200  //init bucket
201  Node* node = _freeList.construct(make_pair(Key(), T()));
202  node->soKey = BitOp::reverse(i) & (~szt(0) << 1); //bucket key has LSB unset
203  if (parent)
204  {
205  //insert into position starting search at parent bucket
206  bool inserted; MarkedHandle cur;
207  tie(inserted, cur) = list_insert(*parent, *node);
208  if (!inserted) _freeList.destroy(node);
209  //try to update bucket pointer
210  bucket.cas(MarkedHandle(cur, old.nextTag(), false), old);
211  }
212  else
213  {
214  //first bucket, try to update bucket pointer
215  if (!bucket.cas(MarkedHandle(_freeList.handle(node), old.nextTag(), false), old))
216  _freeList.destroy(node);
217  }
218  old = bucket.load(atomic::Order::acquire);
219  }
220  return *_freeList.deref(old);
221  }
222 
223  //tuple<inserted, cur>
224  tuple<bool, MarkedHandle> list_insert(Node& head, Node& node) const
225  {
226  while (true)
227  {
228  bool found; Node* prev; MarkedHandle cur, next;
229  tie(found, prev, cur, next) = list_find(head, node.key, node.soKey);
230  if (found) return make_tuple(false, cur);
231  node.next.store(MarkedHandle(cur, node.next.load(atomic::Order::relaxed).nextTag(), false), atomic::Order::relaxed);
232  auto node_ = MarkedHandle(_freeList.handle(&node), cur.nextTag(), false);
233  if (prev->next.cas(node_, cur)) return make_tuple(true, node_);
234  }
235  }
236 
237  bool list_delete(Node& head, const Key& key, szt soKey, optional<T&> val = optnull)
238  {
239  bool found; Node* prev; MarkedHandle cur, next;
240  do
241  {
242  tie(found, prev, cur, next) = list_find(head, key, soKey);
243  if (!found) return false;
244  if (val) val = _freeList.deref(cur)->val; //get val before cas-ing mark otherwise another thread could delete it
245  } while (!_freeList.deref(cur)->next.cas(MarkedHandle(next, next.nextTag(), true), next));
246 
247  if (prev->next.cas(MarkedHandle(next, cur.nextTag(), false), cur))
248  _freeList.destroy(_freeList.deref(cur));
249  else
250  list_find(head, key, soKey);
251  return true;
252  }
253 
254  //tuple<found, prev, cur, next>
255  tuple<bool, Node*, MarkedHandle, MarkedHandle> list_find(Node& head, const Key& key, szt soKey) const
256  {
257  while (true)
258  {
259  Node* prev = &head;
260  MarkedHandle cur = prev->next.load(atomic::Order::acquire);
261  while (true)
262  {
263  if (!cur) return make_tuple(false, prev, cur, MarkedHandle());
264  MarkedHandle next = _freeList.deref(cur)->next.load(atomic::Order::acquire);
265  auto ckey = _freeList.deref(cur)->key;
266  auto csoKey = _freeList.deref(cur)->soKey;
267  if (prev->next.load(atomic::Order::acquire) != cur) break; //try again
268  if (!next.mark())
269  {
270  if (csoKey == soKey && _equal(ckey, key)) return make_tuple(true, prev, cur, next);
271  if (csoKey > soKey) return make_tuple(false, prev, cur, next);
272  prev = _freeList.deref(cur);
273  cur = next;
274  }
275  else
276  {
277  auto next_ = MarkedHandle(next, cur.nextTag(), false);
278  if (prev->next.cas(next_, cur))
279  {
280  _freeList.destroy(_freeList.deref(cur));
281  cur = next_;
282  }
283  else
284  break; //try again
285  }
286  }
287  }
288  }
289 
290  mutable FreeList _freeList;
291  Hash _hash;
292  KeyEqual _equal;
293  SpinLock _lock;
294  array<UniquePtr<Atomic<MarkedHandle>[]>, numeral<uint64>().sizeBits> _segments;
295  uint8 _segmentCount;
296  Atomic<szt> _bucketCount;
297  Atomic<szt> _size;
298  Atomic<float> _maxLoadFactor;
299 };
300 
301 } }
bool find(const Key &key, optional< T & > val=optnull) const
Find element with key and copy its value into val. Returns true on success, false if not found...
Definition: UnorderedMap.h:141
Enables blocks to be referenced via index rather than pointer so that they can include a tag while st...
Definition: Pool.h:75
Holds block handle and tag to prevent lock-free ABA issues.
Definition: Pool.h:95
T * construct(Args &&...args)
Construct object and remove from free list.
Definition: FreeList.h:35
void reserve(szt capacity)
Ensure that enough storage is allocated for a number of objects.
Definition: FreeList.h:27
static optnull_t optnull
Null optional, use to reset an optional to an uninitialized state or test for initialization.
Definition: Optional.h:12
~UnorderedMap()
Definition: UnorderedMap.h:65
float load_factor() const
The current load factor. The load factor is the ratio between the number of elements and the number o...
Definition: UnorderedMap.h:168
float max_load_factor() const
Get the max load factor. The internal hash table will expand when the load factor is above the max lo...
Definition: UnorderedMap.h:170
Inherit to declare that class is not copyable.
Definition: Meta.h:286
static auto _
Definition: Module.cpp:8
Must be a load op. Synchronize with a prior release in another thread.
unsigned char uint8
Definition: Core.h:12
No order constraint, same as plain load/store. Unsafe but best performance.
UnorderedMap(szt capacity=0, const Hash &hash=Hash(), const KeyEqual &equal=KeyEqual())
Definition: UnorderedMap.h:56
void max_load_factor(float f)
Set the max load factor.
Definition: UnorderedMap.h:172
bool erase(const Key &key, optional< T & > val=optnull)
Remove element with key from the map and copy its value into val. Returns true on success...
Definition: UnorderedMap.h:87
#define assert(...)
Forwards to assert_#args. See assert_1(), assert_2().
Definition: Debug.h:24
szt capacity() const
The number of elements for which storage is allocated.
Definition: UnorderedMap.h:70
szt count(const Key &key) const
Return number of elements with matching key (either 0 or 1)
Definition: UnorderedMap.h:159
void reserve(szt capacity)
Ensure that enough storage is allocated for a number of elements.
Definition: UnorderedMap.h:68
size_t szt
Size type, shorthand for size_t.
Definition: Core.h:90
void destroy(T *ptr)
Destroy object and add to free list.
Definition: FreeList.h:40
Lock-free unordered map. Uses auto-expanding freelist allocator so memory is only reclaimed upon dest...
Definition: UnorderedMap.h:14
szt capacity() const
The number of objects for which storage is allocated.
Definition: FreeList.h:29
Specialization for references.
Definition: Optional.h:132
bool insert(Pair &&pair)
Insert new key-value pair into the map. Returns true on success, false if element with key already ex...
Definition: UnorderedMap.h:74
T * deref(Handle handle) const
Get object from compressed handle.
Definition: FreeList.h:45
T value_type
Definition: UnorderedMap.h:54
szt size() const
The number of elements in the map.
Definition: UnorderedMap.h:164
Int tag
Definition: Pool.h:109
void clear()
Remove all elements.
Definition: UnorderedMap.h:98
bool empty() const
Check whether the map does not contain any elements.
Definition: UnorderedMap.h:162
Handle handle(T *ptr) const
Get compressed handle for object.
Definition: FreeList.h:43
szt bucket_count() const
The number of buckets. A bucket is a slot in the internal hash table to which elements are assigned b...
Definition: UnorderedMap.h:166
Global Honeycomb namespace.
mt::uintBySize< sizeof(atomic::SwapMaxType)/2 >::type Int
Definition: Pool.h:78