1 /*
   2  * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jfr/jfrEvents.hpp"
  27 #include "jfr/jni/jfrJavaSupport.hpp"
  28 #include "jfr/recorder/jfrRecorder.hpp"
  29 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
  30 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
  31 #include "jfr/recorder/service/jfrOptionSet.hpp"
  32 #include "jfr/recorder/service/jfrPostBox.hpp"
  33 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
  34 #include "jfr/recorder/storage/jfrStorage.hpp"
  35 #include "jfr/recorder/storage/jfrStorageControl.hpp"
  36 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
  37 #include "jfr/utilities/jfrIterator.hpp"
  38 #include "jfr/utilities/jfrTime.hpp"
  39 #include "jfr/writers/jfrNativeEventWriter.hpp"
  40 #include "logging/log.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/safepoint.hpp"
  45 #include "runtime/thread.hpp"
  46 
  47 typedef JfrStorage::Buffer* BufferPtr;
  48 
  49 static JfrStorage* _instance = NULL;
  50 static JfrStorageControl* _control;
  51 
  52 JfrStorage& JfrStorage::instance() {
  53   return *_instance;
  54 }
  55 
  56 JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
  57   assert(_instance == NULL, "invariant");
  58   _instance = new JfrStorage(chunkwriter, post_box);
  59   return _instance;
  60 }
  61 
  62 void JfrStorage::destroy() {
  63   if (_instance != NULL) {
  64     delete _instance;
  65     _instance = NULL;
  66   }
  67 }
  68 
  69 JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
  70   _control(NULL),
  71   _global_mspace(NULL),
  72   _thread_local_mspace(NULL),
  73   _transient_mspace(NULL),
  74   _age_mspace(NULL),
  75   _chunkwriter(chunkwriter),
  76   _post_box(post_box) {}
  77 
  78 JfrStorage::~JfrStorage() {
  79   if (_control != NULL) {
  80     delete _control;
  81   }
  82   if (_global_mspace != NULL) {
  83     delete _global_mspace;
  84   }
  85   if (_thread_local_mspace != NULL) {
  86     delete _thread_local_mspace;
  87   }
  88   if (_transient_mspace != NULL) {
  89     delete _transient_mspace;
  90   }
  91   if (_age_mspace != NULL) {
  92     delete _age_mspace;
  93   }
  94   _instance = NULL;
  95 }
  96 
  97 static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
  98 static const size_t unlimited_mspace_size = 0;
  99 static const size_t thread_local_cache_count = 8;
 100 static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
 101 static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
 102 
 103 template <typename Mspace>
 104 static Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
 105   Mspace* mspace = new Mspace(buffer_size, limit, cache_count, storage_instance);
 106   if (mspace != NULL) {
 107     mspace->initialize();
 108   }
 109   return mspace;
 110 }
 111 
 112 bool JfrStorage::initialize() {
 113   assert(_control == NULL, "invariant");
 114   assert(_global_mspace == NULL, "invariant");
 115   assert(_thread_local_mspace == NULL, "invariant");
 116   assert(_transient_mspace == NULL, "invariant");
 117   assert(_age_mspace == NULL, "invariant");
 118 
 119   const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
 120   assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
 121   const size_t memory_size = (size_t)JfrOptionSet::memory_size();
 122   const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
 123   const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
 124 
 125   _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
 126   if (_control == NULL) {
 127     return false;
 128   }
 129   _global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
 130   if (_global_mspace == NULL) {
 131     return false;
 132   }
 133   _thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
 134   if (_thread_local_mspace == NULL) {
 135     return false;
 136   }
 137   _transient_mspace = create_mspace<JfrStorageMspace>(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
 138   if (_transient_mspace == NULL) {
 139     return false;
 140   }
 141   _age_mspace = create_mspace<JfrStorageAgeMspace>(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
 142   if (_age_mspace == NULL) {
 143     return false;
 144   }
 145   control().set_scavenge_threshold(thread_local_scavenge_threshold);
 146   return true;
 147 }
 148 
 149 JfrStorageControl& JfrStorage::control() {
 150   return *instance()._control;
 151 }
 152 
 153 static void log_allocation_failure(const char* msg, size_t size) {
 154   log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
 155 }
 156 
 157 BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
 158   BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
 159   if (buffer == NULL) {
 160     log_allocation_failure("thread local_memory", size);
 161     return NULL;
 162   }
 163   assert(buffer->acquired_by_self(), "invariant");
 164   return buffer;
 165 }
 166 
 167 BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
 168   BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
 169   if (buffer == NULL) {
 170     log_allocation_failure("transient memory", size);
 171     return NULL;
 172   }
 173   assert(buffer->acquired_by_self(), "invariant");
 174   assert(buffer->transient(), "invariant");
 175   assert(buffer->lease(), "invariant");
 176   return buffer;
 177 }
 178 
 179 static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
 180   assert(size <= mspace->min_elem_size(), "invariant");
 181   while (true) {
 182     BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
 183     if (t == NULL && storage_instance.control().should_discard()) {
 184       storage_instance.discard_oldest(thread);
 185       continue;
 186     }
 187     return t;
 188   }
 189 }
 190 
 191 static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
 192   assert(size <= mspace->min_elem_size(), "invariant");
 193   while (true) {
 194     BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
 195     if (t == NULL && storage_instance.control().should_discard()) {
 196       storage_instance.discard_oldest(thread);
 197       continue;
 198     }
 199     return t;
 200   }
 201 }
 202 
 203 static const size_t lease_retry = 10;
 204 
 205 BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
 206   JfrStorage& storage_instance = instance();
 207   const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
 208   // if not too large and capacity is still available, ask for a lease from the global system
 209   if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
 210     BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
 211     if (buffer != NULL) {
 212       assert(buffer->acquired_by_self(), "invariant");
 213       assert(!buffer->transient(), "invariant");
 214       assert(buffer->lease(), "invariant");
 215       storage_instance.control().increment_leased();
 216       return buffer;
 217     }
 218   }
 219   return acquire_transient(size, thread);
 220 }
 221 
 222 static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
 223   assert(buffer != NULL, "invariant");
 224   assert(buffer->empty(), "invariant");
 225   const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
 226   if (EventDataLoss::is_enabled()) {
 227     JfrNativeEventWriter writer(buffer, thread);
 228     writer.write<u8>(EventDataLoss::eventId);
 229     writer.write(JfrTicks::now());
 230     writer.write(unflushed_size);
 231     writer.write(total_data_loss);
 232   }
 233 }
 234 
 235 static void write_data_loss(BufferPtr buffer, Thread* thread) {
 236   assert(buffer != NULL, "invariant");
 237   const size_t unflushed_size = buffer->unflushed_size();
 238   buffer->concurrent_reinitialization();
 239   if (unflushed_size == 0) {
 240     return;
 241   }
 242   write_data_loss_event(buffer, unflushed_size, thread);
 243 }
 244 
 245 static const size_t promotion_retry = 100;
 246 
 247 bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
 248   assert(buffer != NULL, "invariant");
 249   assert(!buffer->lease(), "invariant");
 250   assert(!buffer->transient(), "invariant");
 251   const size_t unflushed_size = buffer->unflushed_size();
 252   if (unflushed_size == 0) {
 253     buffer->concurrent_reinitialization();
 254     assert(buffer->empty(), "invariant");
 255     return true;
 256   }
 257 
 258   if (buffer->excluded()) {
 259     const bool thread_is_excluded = thread->jfr_thread_local()->is_excluded();
 260     buffer->reinitialize(thread_is_excluded);
 261     assert(buffer->empty(), "invariant");
 262     if (!thread_is_excluded) {
 263       // state change from exclusion to inclusion requires a thread checkpoint
 264       JfrCheckpointManager::write_thread_checkpoint(thread);
 265     }
 266     return true;
 267   }
 268 
 269   BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
 270   if (promotion_buffer == NULL) {
 271     write_data_loss(buffer, thread);
 272     return false;
 273   }
 274   assert(promotion_buffer->acquired_by_self(), "invariant");
 275   assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
 276   buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
 277   assert(buffer->empty(), "invariant");
 278   return true;
 279 }
 280 
 281 /*
 282 * 1. If the buffer was a "lease" from the global system, release back.
 283 * 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
 284 *
 285 * The buffer is effectively invalidated for the thread post-return,
 286 * and the caller should take means to ensure that it is not referenced any longer.
 287 */
 288 void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
 289   assert(buffer != NULL, "invariant");
 290   assert(buffer->lease(), "invariant");
 291   assert(buffer->acquired_by_self(), "invariant");
 292   buffer->clear_lease();
 293   if (buffer->transient()) {
 294     buffer->set_retired();
 295     register_full(buffer, thread);
 296   } else {
 297     buffer->release();
 298     control().decrement_leased();
 299   }
 300 }
 301 
 302 static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
 303   assert(buffer != NULL, "invariant");
 304   assert(age_mspace != NULL, "invariant");
 305   return mspace_allocate_transient(0, age_mspace, thread);
 306 }
 307 
 308 static void log_registration_failure(size_t unflushed_size) {
 309   log_warning(jfr)("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
 310   log_debug(jfr, system)("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
 311 }
 312 
 313 static void handle_registration_failure(BufferPtr buffer) {
 314   assert(buffer != NULL, "invariant");
 315   assert(buffer->retired(), "invariant");
 316   const size_t unflushed_size = buffer->unflushed_size();
 317   buffer->concurrent_reinitialization();
 318   log_registration_failure(unflushed_size);
 319 }
 320 
 321 static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
 322   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 323   return mspace_get_free_with_detach(0, age_mspace, thread);
 324 }
 325 
 326 static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
 327   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 328   assert(age_node != NULL, "invariant");
 329   assert(age_node->acquired_by_self(), "invariant");
 330   assert(age_node->retired_buffer()->retired(), "invariant");
 331   age_node->release(); // drop identity claim on age node when inserting to full list
 332   assert(age_node->identity() == NULL, "invariant");
 333   age_mspace->insert_full_head(age_node);
 334   return true;
 335 }
 336 
 337 static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
 338   assert(buffer != NULL, "invariant");
 339   assert(buffer->retired(), "invariant");
 340   assert(age_mspace != NULL, "invariant");
 341   MutexLocker lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 342   JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
 343   if (age_node == NULL) {
 344     age_node = new_age_node(buffer, age_mspace, thread);
 345     if (age_node == NULL) {
 346       return false;
 347     }
 348   }
 349   assert(age_node != NULL, "invariant");
 350   assert(age_node->acquired_by_self(), "invariant");
 351   age_node->set_retired_buffer(buffer);
 352   control.increment_full();
 353   return insert_full_age_node(age_node, age_mspace, thread);
 354 }
 355 
 356 void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
 357   assert(buffer != NULL, "invariant");
 358   assert(buffer->retired(), "invariant");
 359   assert(buffer->acquired_by(thread), "invariant");
 360   if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
 361     handle_registration_failure(buffer);
 362   }
 363   if (control().should_post_buffer_full_message()) {
 364     _post_box.post(MSG_FULLBUFFER);
 365   }
 366 }
 367 
 368 void JfrStorage::lock() {
 369   assert(!JfrBuffer_lock->owned_by_self(), "invariant");
 370   JfrBuffer_lock->lock_without_safepoint_check();
 371 }
 372 
 373 void JfrStorage::unlock() {
 374   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 375   JfrBuffer_lock->unlock();
 376 }
 377 
 378 #ifdef ASSERT
 379 bool JfrStorage::is_locked() const {
 380   return JfrBuffer_lock->owned_by_self();
 381 }
 382 #endif
 383 
 384 // don't use buffer on return, it is gone
 385 void JfrStorage::release(BufferPtr buffer, Thread* thread) {
 386   assert(buffer != NULL, "invariant");
 387   assert(!buffer->lease(), "invariant");
 388   assert(!buffer->transient(), "invariant");
 389   assert(!buffer->retired(), "invariant");
 390   if (!buffer->empty()) {
 391     if (!flush_regular_buffer(buffer, thread)) {
 392       buffer->concurrent_reinitialization();
 393     }
 394   }
 395   assert(buffer->empty(), "invariant");
 396   assert(buffer->identity() != NULL, "invariant");
 397   control().increment_dead();
 398   buffer->set_retired();
 399 }
 400 
 401 void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
 402   assert(buffer != NULL, "invariant");
 403   JfrStorage& storage_instance = instance();
 404   storage_instance.release(buffer, thread);
 405   if (storage_instance.control().should_scavenge()) {
 406     storage_instance._post_box.post(MSG_DEADBUFFER);
 407   }
 408 }
 409 
 410 static void log_discard(size_t count, size_t amount, size_t current) {
 411   if (log_is_enabled(Debug, jfr, system)) {
 412     assert(count > 0, "invariant");
 413     log_debug(jfr, system)("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
 414     log_debug(jfr, system)("Current number of full buffers " SIZE_FORMAT "", current);
 415   }
 416 }
 417 
 418 void JfrStorage::discard_oldest(Thread* thread) {
 419   if (JfrBuffer_lock->try_lock()) {
 420     if (!control().should_discard()) {
 421       // another thread handled it
 422       return;
 423     }
 424     const size_t num_full_pre_discard = control().full_count();
 425     size_t num_full_post_discard = 0;
 426     size_t discarded_size = 0;
 427     while (true) {
 428       JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
 429       if (oldest_age_node == NULL) {
 430         break;
 431       }
 432       assert(oldest_age_node->identity() == NULL, "invariant");
 433       BufferPtr const buffer = oldest_age_node->retired_buffer();
 434       assert(buffer->retired(), "invariant");
 435       discarded_size += buffer->discard();
 436       assert(buffer->unflushed_size() == 0, "invariant");
 437       num_full_post_discard = control().decrement_full();
 438       mspace_release_full(oldest_age_node, _age_mspace);
 439       if (buffer->transient()) {
 440         mspace_release_full(buffer, _transient_mspace);
 441         continue;
 442       }
 443       buffer->reinitialize();
 444       buffer->release(); // publish
 445       break;
 446     }
 447     JfrBuffer_lock->unlock();
 448     const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
 449     if (number_of_discards > 0) {
 450       log_discard(number_of_discards, discarded_size, num_full_post_discard);
 451     }
 452   }
 453 }
 454 
 455 #ifdef ASSERT
 456 typedef const BufferPtr ConstBufferPtr;
 457 
 458 static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
 459   assert(t != NULL, "invariant");
 460   assert(cur != NULL, "invariant");
 461   assert(cur->pos() + used <= cur->end(), "invariant");
 462   assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
 463 }
 464 
 465 static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
 466   assert(t != NULL, "invariant");
 467   assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
 468   assert(cur != NULL, "invariant");
 469   assert(!cur->lease(), "invariant");
 470   assert(cur_pos != NULL, "invariant");
 471   assert(req >= used, "invariant");
 472 }
 473 
 474 static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
 475   assert(cur != NULL, "invariant");
 476   assert(t != NULL, "invariant");
 477   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
 478   assert(req >= used, "invariant");
 479 }
 480 
 481 static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 482   assert(t != NULL, "invariant");
 483   assert(cur != NULL, "invariant");
 484   assert(cur->lease(), "invariant");
 485   assert(!cur->excluded(), "invariant");
 486   assert(cur_pos != NULL, "invariant");
 487   assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
 488   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
 489   assert(req >= used, "invariant");
 490   assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
 491 }
 492 #endif // ASSERT
 493 
 494 BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
 495   debug_only(assert_flush_precondition(cur, used, native, t);)
 496   const u1* const cur_pos = cur->pos();
 497   req += used;
 498   // requested size now encompass the outstanding used size
 499   return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
 500                           instance().flush_regular(cur, cur_pos, used, req, native, t);
 501 }
 502 
 503 BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 504   debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
 505   // A flush is needed before memcpy since a non-large buffer is thread stable
 506   // (thread local). The flush will not modify memory in addresses above pos()
 507   // which is where the "used / uncommitted" data resides. It is therefore both
 508   // possible and valid to migrate data after the flush. This is however only
 509   // the case for stable thread local buffers; it is not the case for large buffers.
 510   if (!cur->empty()) {
 511     flush_regular_buffer(cur, t);
 512     if (cur->excluded()) {
 513       return cur;
 514     }
 515   }
 516   assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
 517   if (cur->free_size() >= req) {
 518     // simplest case, no switching of buffers
 519     if (used > 0) {
 520       memcpy(cur->pos(), (void*)cur_pos, used);
 521     }
 522     assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
 523     return cur;
 524   }
 525   // Going for a "larger-than-regular" buffer.
 526   // Shelve the current buffer to make room for a temporary lease.
 527   t->jfr_thread_local()->shelve_buffer(cur);
 528   return provision_large(cur, cur_pos, used, req, native, t);
 529 }
 530 
 531 static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
 532   assert(buffer != NULL, "invariant");
 533   if (native) {
 534     jfr_thread_local->set_native_buffer(buffer);
 535   } else {
 536     jfr_thread_local->set_java_buffer(buffer);
 537   }
 538   return buffer;
 539 }
 540 
 541 static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
 542   JfrThreadLocal* const tl = t->jfr_thread_local();
 543   BufferPtr shelved = tl->shelved_buffer();
 544   assert(shelved != NULL, "invariant");
 545   tl->shelve_buffer(NULL);
 546   // restore shelved buffer back as primary
 547   return store_buffer_to_thread_local(shelved, tl, native);
 548 }
 549 
 550 BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 551   debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
 552   // Can the "regular" buffer (now shelved) accommodate the requested size?
 553   BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
 554   assert(shelved != NULL, "invariant");
 555   if (shelved->free_size() >= req) {
 556     if (req > 0) {
 557       memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
 558     }
 559     // release and invalidate
 560     release_large(cur, t);
 561     return restore_shelved_buffer(native, t);
 562   }
 563   // regular too small
 564   return provision_large(cur, cur_pos,  used, req, native, t);
 565 }
 566 
 567 static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
 568   assert(cur != NULL, "invariant");
 569   assert(t != NULL, "invariant");
 570   if (cur->lease()) {
 571     storage_instance.release_large(cur, t);
 572   }
 573   return restore_shelved_buffer(native, t);
 574 }
 575 
 576 // Always returns a non-null buffer.
 577 // If accommodating the large request fails, the shelved buffer is returned
 578 // even though it might be smaller than the requested size.
 579 // Caller needs to ensure if the size was successfully accommodated.
 580 BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 581   debug_only(assert_provision_large_precondition(cur, used, req, t);)
 582   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
 583   BufferPtr const buffer = acquire_large(req, t);
 584   if (buffer == NULL) {
 585     // unable to allocate and serve the request
 586     return large_fail(cur, native, *this, t);
 587   }
 588   // ok managed to acquire a "large" buffer for the requested size
 589   assert(buffer->free_size() >= req, "invariant");
 590   assert(buffer->lease(), "invariant");
 591   // transfer outstanding data
 592   memcpy(buffer->pos(), (void*)cur_pos, used);
 593   if (cur->lease()) {
 594     release_large(cur, t);
 595     // don't use current anymore, it is gone
 596   }
 597   return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
 598 }
 599 
 600 typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
 601 typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
 602 typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
 603 
 604 typedef Retired<JfrBuffer, true> NonRetired;
 605 typedef Excluded<JfrBuffer, true> NonExcluded;
 606 typedef CompositeOperation<NonRetired, NonExcluded> BufferPredicate;
 607 typedef PredicatedMutexedWriteOp<WriteOperation, BufferPredicate> ThreadLocalMutexedWriteOperation;
 608 typedef PredicatedConcurrentWriteOp<WriteOperation, BufferPredicate> ThreadLocalConcurrentWriteOperation;
 609 
 610 size_t JfrStorage::write() {
 611   const size_t full_elements = write_full();
 612   WriteOperation wo(_chunkwriter);
 613   NonRetired nr;
 614   NonExcluded ne;
 615   BufferPredicate bp(&nr, &ne);
 616   ThreadLocalConcurrentWriteOperation tlwo(wo, bp);
 617   process_full_list(tlwo, _thread_local_mspace);
 618   ConcurrentWriteOperation cwo(wo);
 619   process_free_list(cwo, _global_mspace);
 620   return full_elements + wo.elements();
 621 }
 622 
 623 size_t JfrStorage::write_at_safepoint() {
 624   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 625   WriteOperation wo(_chunkwriter);
 626   MutexedWriteOperation writer(wo); // mutexed write mode
 627   NonRetired nr;
 628   NonExcluded ne;
 629   BufferPredicate bp(&nr, &ne);
 630   ThreadLocalMutexedWriteOperation tlmwo(wo, bp);
 631   process_full_list(tlmwo, _thread_local_mspace);
 632   assert(_transient_mspace->is_free_empty(), "invariant");
 633   process_full_list(writer, _transient_mspace);
 634   assert(_global_mspace->is_full_empty(), "invariant");
 635   process_free_list(writer, _global_mspace);
 636   return wo.elements();
 637 }
 638 
 639 typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
 640 typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
 641 typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
 642 
 643 size_t JfrStorage::clear() {
 644   const size_t full_elements = clear_full();
 645   DiscardOperation discarder(concurrent); // concurrent discard mode
 646   process_full_list(discarder, _thread_local_mspace);
 647   assert(_transient_mspace->is_free_empty(), "invariant");
 648   process_full_list(discarder, _transient_mspace);
 649   assert(_global_mspace->is_full_empty(), "invariant");
 650   process_free_list(discarder, _global_mspace);
 651   return full_elements + discarder.elements();
 652 }
 653 
 654 static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
 655   if (tail != NULL) {
 656     assert(tail->next() == NULL, "invariant");
 657     assert(head != NULL, "invariant");
 658     assert(head->prev() == NULL, "invariant");
 659     MutexLocker buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 660     age_mspace->insert_free_tail(head, tail, count);
 661   }
 662 }
 663 
 664 template <typename Processor>
 665 static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
 666   assert(age_mspace != NULL, "invariant");
 667   assert(head != NULL, "invariant");
 668   assert(count > 0, "invariant");
 669   JfrAgeNode* node = head;
 670   JfrAgeNode* last = NULL;
 671   while (node != NULL) {
 672     last = node;
 673     assert(node->identity() == NULL, "invariant");
 674     BufferPtr const buffer = node->retired_buffer();
 675     assert(buffer != NULL, "invariant");
 676     assert(buffer->retired(), "invariant");
 677     processor.process(buffer);
 678     // at this point, buffer is already live or destroyed
 679     JfrAgeNode* const next = (JfrAgeNode*)node->next();
 680     if (node->transient()) {
 681       // detach
 682       last = (JfrAgeNode*)last->prev();
 683       if (last != NULL) {
 684         last->set_next(next);
 685       } else {
 686         head = next;
 687       }
 688       if (next != NULL) {
 689         next->set_prev(last);
 690       }
 691       --count;
 692       age_mspace->deallocate(node);
 693     }
 694     node = next;
 695   }
 696   insert_free_age_nodes(age_mspace, head, last, count);
 697 }
 698 
 699 template <typename Processor>
 700 static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
 701   assert(age_mspace != NULL, "invariant");
 702   if (age_mspace->is_full_empty()) {
 703     // nothing to do
 704     return 0;
 705   }
 706   size_t count;
 707   JfrAgeNode* head;
 708   {
 709     // fetch age list
 710     MutexLocker buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 711     count = age_mspace->full_count();
 712     head = age_mspace->clear_full();
 713     control.reset_full();
 714   }
 715   assert(head != NULL, "invariant");
 716   assert(count > 0, "invariant");
 717   process_age_list(processor, age_mspace, head, count);
 718   return count;
 719 }
 720 
 721 static void log(size_t count, size_t amount, bool clear = false) {
 722   if (log_is_enabled(Debug, jfr, system)) {
 723     if (count > 0) {
 724       log_debug(jfr, system)("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
 725         clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
 726     }
 727   }
 728 }
 729 
 730 // full writer
 731 // Assumption is retired only; exclusive access
 732 // MutexedWriter -> ReleaseOp
 733 //
 734 size_t JfrStorage::write_full() {
 735   assert(_chunkwriter.is_valid(), "invariant");
 736   Thread* const thread = Thread::current();
 737   WriteOperation wo(_chunkwriter);
 738   MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
 739   ReleaseOperation ro(_transient_mspace, thread);
 740   FullOperation cmd(&writer, &ro);
 741   const size_t count = process_full(cmd, control(), _age_mspace);
 742   if (0 == count) {
 743     assert(0 == writer.elements(), "invariant");
 744     return 0;
 745   }
 746   const size_t size = writer.size();
 747   log(count, size);
 748   return count;
 749 }
 750 
 751 size_t JfrStorage::clear_full() {
 752   DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
 753   const size_t count = process_full(discarder, control(), _age_mspace);
 754   if (0 == count) {
 755     assert(0 == discarder.elements(), "invariant");
 756     return 0;
 757   }
 758   const size_t size = discarder.size();
 759   log(count, size, true);
 760   return count;
 761 }
 762 
 763 static void scavenge_log(size_t count, size_t amount, size_t current) {
 764   if (count > 0) {
 765     if (log_is_enabled(Debug, jfr, system)) {
 766       log_debug(jfr, system)("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
 767       log_debug(jfr, system)("Current number of dead buffers " SIZE_FORMAT "", current);
 768     }
 769   }
 770 }
 771 
 772 template <typename Mspace>
 773 class Scavenger {
 774 private:
 775   JfrStorageControl& _control;
 776   Mspace* _mspace;
 777   size_t _count;
 778   size_t _amount;
 779 public:
 780   typedef typename Mspace::Type Type;
 781   Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
 782   bool process(Type* t) {
 783     if (t->retired()) {
 784       assert(t->identity() != NULL, "invariant");
 785       assert(t->empty(), "invariant");
 786       assert(!t->transient(), "invariant");
 787       assert(!t->lease(), "invariant");
 788       ++_count;
 789       _amount += t->total_size();
 790       if (t->excluded()) {
 791         t->clear_excluded();
 792       }
 793       assert(!t->excluded(), "invariant");
 794       t->clear_retired();
 795       t->release();
 796       _control.decrement_dead();
 797       mspace_release_full_critical(t, _mspace);
 798     }
 799     return true;
 800   }
 801   size_t processed() const { return _count; }
 802   size_t amount() const { return _amount; }
 803 };
 804 
 805 size_t JfrStorage::scavenge() {
 806   JfrStorageControl& ctrl = control();
 807   if (ctrl.dead_count() == 0) {
 808     return 0;
 809   }
 810   Scavenger<JfrThreadLocalMspace> scavenger(ctrl, _thread_local_mspace);
 811   process_full_list(scavenger, _thread_local_mspace);
 812   const size_t count = scavenger.processed();
 813   if (0 == count) {
 814     assert(0 == scavenger.amount(), "invariant");
 815     return 0;
 816   }
 817   scavenge_log(count, scavenger.amount(), ctrl.dead_count());
 818   return count;
 819 }