1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389

base / task / sequence_manager / lazily_deallocated_deque.h [blame]

// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_

#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <utility>
#include <vector>

#include "base/check.h"
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/containers/heap_array.h"
#include "base/containers/span.h"
#include "base/debug/alias.h"
#include "base/gtest_prod_util.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/raw_ptr_exclusion.h"
#include "base/memory/raw_span.h"
#include "base/time/time.h"

namespace base {
namespace sequence_manager {
namespace internal {

// A LazilyDeallocatedDeque specialized for the SequenceManager's usage
// patterns. The queue generally grows while tasks are added and then removed
// until empty and the cycle repeats.
//
// The main difference between sequence_manager::LazilyDeallocatedDeque and
// others is memory management.  For performance (memory allocation isn't free)
// we don't automatically reclaiming memory when the queue becomes empty.
// Instead we rely on the surrounding code periodically calling
// MaybeShrinkQueue, ideally when the queue is empty.
//
// We keep track of the maximum recent queue size and rate limit
// MaybeShrinkQueue to avoid unnecessary churn.
//
// NB this queue isn't by itself thread safe.
template <typename T, TimeTicks (*now_source)() = TimeTicks::Now>
class LazilyDeallocatedDeque {
 public:
  enum {
    // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
    // 3 elements.
    kMinimumRingSize = 4,

    // Maximum "wasted" capacity allowed when considering if we should resize
    // the backing store.
    kReclaimThreshold = 16,

    // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
    // queue.
    kMinimumShrinkIntervalInSeconds = 5
  };

  LazilyDeallocatedDeque() = default;
  LazilyDeallocatedDeque(const LazilyDeallocatedDeque&) = delete;
  LazilyDeallocatedDeque& operator=(const LazilyDeallocatedDeque&) = delete;
  ~LazilyDeallocatedDeque() { clear(); }

  bool empty() const { return size_ == 0; }

  size_t max_size() const { return max_size_; }

  size_t size() const { return size_; }

  size_t capacity() const {
    size_t capacity = 0;
    for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
      capacity += iter->capacity();
    }
    return capacity;
  }

  void clear() {
    while (head_) {
      head_ = std::move(head_->next_);
    }

    tail_ = nullptr;
    size_ = 0;
  }

  // Assumed to be an uncommon operation.
  void push_front(T t) {
    if (!head_) {
      DCHECK(!tail_);
      head_ = std::make_unique<Ring>(kMinimumRingSize);
      tail_ = head_.get();
    }

    // Grow if needed, by the minimum amount.
    if (!head_->CanPush()) {
      // TODO(alexclarke): Remove once we've understood the OOMs.
      size_t size = size_;
      base::debug::Alias(&size);

      std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
      new_ring->next_ = std::move(head_);
      head_ = std::move(new_ring);
    }

    head_->push_front(std::move(t));
    max_size_ = std::max(max_size_, ++size_);
  }

  // Assumed to be a common operation.
  void push_back(T t) {
    if (!head_) {
      DCHECK(!tail_);
      head_ = std::make_unique<Ring>(kMinimumRingSize);
      tail_ = head_.get();
    }

    // Grow if needed.
    if (!tail_->CanPush()) {
      // TODO(alexclarke): Remove once we've understood the OOMs.
      size_t size = size_;
      base::debug::Alias(&size);

      // Doubling the size is a common strategy, but one which can be wasteful
      // so we use a (somewhat) slower growth curve.
      tail_->next_ = std::make_unique<Ring>(2 + tail_->capacity() +
                                            (tail_->capacity() / 2));
      tail_ = tail_->next_.get();
    }

    tail_->push_back(std::move(t));
    max_size_ = std::max(max_size_, ++size_);
  }

  T& front() LIFETIME_BOUND {
    DCHECK(head_);
    return head_->front();
  }

  const T& front() const LIFETIME_BOUND {
    DCHECK(head_);
    return head_->front();
  }

  T& back() LIFETIME_BOUND {
    DCHECK(tail_);
    return tail_->back();
  }

  const T& back() const LIFETIME_BOUND {
    DCHECK(tail_);
    return tail_->back();
  }

  void pop_front() {
    DCHECK(head_);
    DCHECK(!head_->empty());
    DCHECK(tail_);
    DCHECK_GT(size_, 0u);
    head_->pop_front();

    // If the ring has become empty and we have several rings then, remove the
    // head one (which we expect to have lower capacity than the remaining
    // ones).
    if (head_->empty() && head_->next_) {
      head_ = std::move(head_->next_);
    }

    --size_;
  }

  void swap(LazilyDeallocatedDeque& other) {
    std::swap(head_, other.head_);
    std::swap(tail_, other.tail_);
    std::swap(size_, other.size_);
    std::swap(max_size_, other.max_size_);
    std::swap(next_resize_time_, other.next_resize_time_);
  }

  void MaybeShrinkQueue() {
    if (!tail_)
      return;

    DCHECK_GE(max_size_, size_);

    // Rate limit how often we shrink the queue because it's somewhat expensive.
    TimeTicks current_time = now_source();
    if (current_time < next_resize_time_)
      return;

    // Due to the way the Ring works we need 1 more slot than is used.
    size_t new_capacity = max_size_ + 1;
    if (new_capacity < kMinimumRingSize)
      new_capacity = kMinimumRingSize;

    // Reset |max_size_| so that unless usage has spiked up we will consider
    // reclaiming it next time.
    max_size_ = size_;

    // Only realloc if the current capacity is sufficiently greater than the
    // observed maximum size for the previous period.
    if (new_capacity + kReclaimThreshold >= capacity())
      return;

    SetCapacity(new_capacity);
    next_resize_time_ = current_time + Seconds(kMinimumShrinkIntervalInSeconds);
  }

  void SetCapacity(size_t new_capacity) {
    std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);

    DCHECK_GE(new_capacity, size_ + 1);

    // Preserve the |size_| which counts down to zero in the while loop.
    size_t real_size = size_;

    while (!empty()) {
      DCHECK(new_ring->CanPush());
      new_ring->push_back(std::move(head_->front()));
      pop_front();
    }

    size_ = real_size;

    DCHECK_EQ(head_.get(), tail_);
    head_ = std::move(new_ring);
    tail_ = head_.get();
  }

 private:
  FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
  FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
  FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
  FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);

  struct Ring {
    explicit Ring(size_t capacity) {
      DCHECK_GE(capacity, kMinimumRingSize);
      std::tie(backing_store_, data_) = AlignedUninitCharArray<T>(capacity);
    }
    Ring(const Ring&) = delete;
    Ring& operator=(const Ring&) = delete;
    ~Ring() {
      while (!empty()) {
        pop_front();
      }
    }

    bool empty() const { return back_index_ == before_front_index_; }

    size_t capacity() const { return data_.size(); }

    bool CanPush() const {
      return before_front_index_ != CircularIncrement(back_index_);
    }

    void push_front(T&& t) {
      // Mustn't appear to become empty.
      CHECK_NE(CircularDecrement(before_front_index_), back_index_);
      std::construct_at(data_.get_at(before_front_index_), std::move(t));
      before_front_index_ = CircularDecrement(before_front_index_);
    }

    void push_back(T&& t) {
      back_index_ = CircularIncrement(back_index_);
      CHECK(!empty());  // Mustn't appear to become empty.
      std::construct_at(data_.get_at(back_index_), std::move(t));
    }

    void pop_front() {
      CHECK(!empty());
      before_front_index_ = CircularIncrement(before_front_index_);
      data_[before_front_index_].~T();
    }

    T& front() LIFETIME_BOUND {
      CHECK(!empty());
      return data_[CircularIncrement(before_front_index_)];
    }

    const T& front() const LIFETIME_BOUND {
      CHECK(!empty());
      return data_[CircularIncrement(before_front_index_)];
    }

    T& back() LIFETIME_BOUND {
      CHECK(!empty());
      return data_[back_index_];
    }

    const T& back() const LIFETIME_BOUND {
      CHECK(!empty());
      return data_[back_index_];
    }

    size_t CircularDecrement(size_t index) const {
      if (index == 0)
        return capacity() - 1;
      return index - 1;
    }

    size_t CircularIncrement(size_t index) const {
      CHECK_LT(index, capacity());
      ++index;
      if (index == capacity()) {
        return 0;
      }
      return index;
    }

    AlignedHeapArray<char> backing_store_;
    raw_span<T> data_;
    // Indices into `data_` for one-before-the-first element and the last
    // element. The back_index_ may be less than before_front_index_ if the
    // elements wrap around the back of the array. If they are equal, then the
    // Ring is empty.
    size_t before_front_index_ = 0;
    size_t back_index_ = 0;
    std::unique_ptr<Ring> next_ = nullptr;
  };

 public:
  class Iterator {
   public:
    using value_type = T;
    using pointer = const T*;
    using reference = const T&;

    const T& operator->() const { return ring_->data_[index_]; }
    const T& operator*() const { return ring_->data_[index_]; }

    Iterator& operator++() {
      if (index_ == ring_->back_index_) {
        ring_ = ring_->next_.get();
        index_ =
            ring_ ? ring_->CircularIncrement(ring_->before_front_index_) : 0;
      } else {
        index_ = ring_->CircularIncrement(index_);
      }
      return *this;
    }

    operator bool() const { return !!ring_; }

   private:
    explicit Iterator(const Ring* ring) {
      if (!ring || ring->empty()) {
        ring_ = nullptr;
        index_ = 0;
        return;
      }

      ring_ = ring;
      index_ = ring_->CircularIncrement(ring->before_front_index_);
    }

    raw_ptr<const Ring> ring_;
    size_t index_;

    friend class LazilyDeallocatedDeque;
  };

  Iterator begin() const { return Iterator(head_.get()); }

  Iterator end() const { return Iterator(nullptr); }

 private:
  // We maintain a list of Ring buffers, to enable us to grow without copying,
  // but most of the time we aim to have only one active Ring.
  std::unique_ptr<Ring> head_;

  // `tail_` is not a raw_ptr<...> for performance reasons (based on analysis of
  // sampling profiler data and tab_search:top100:2020).
  RAW_PTR_EXCLUSION Ring* tail_ = nullptr;

  size_t size_ = 0;
  size_t max_size_ = 0;
  TimeTicks next_resize_time_;
};

}  // namespace internal
}  // namespace sequence_manager
}  // namespace base

#endif  // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_