1 /*
   2  * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jfr/jfrEvents.hpp"
  27 #include "jfr/jni/jfrJavaSupport.hpp"
  28 #include "jfr/recorder/jfrRecorder.hpp"
  29 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
  30 #include "jfr/recorder/service/jfrOptionSet.hpp"
  31 #include "jfr/recorder/service/jfrPostBox.hpp"
  32 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
  33 #include "jfr/recorder/storage/jfrStorage.hpp"
  34 #include "jfr/recorder/storage/jfrStorageControl.hpp"
  35 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
  36 #include "jfr/utilities/jfrIterator.hpp"
  37 #include "jfr/utilities/jfrTime.hpp"
  38 #include "jfr/writers/jfrNativeEventWriter.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/orderAccess.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepoint.hpp"
  43 #include "runtime/thread.hpp"
  44 
  45 typedef JfrStorage::Buffer* BufferPtr;
  46 
  47 static JfrStorage* _instance = NULL;
  48 static JfrStorageControl* _control;
  49 
  50 JfrStorage& JfrStorage::instance() {
  51   return *_instance;
  52 }
  53 
  54 JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
  55   assert(_instance == NULL, "invariant");
  56   _instance = new JfrStorage(chunkwriter, post_box);
  57   return _instance;
  58 }
  59 
  60 void JfrStorage::destroy() {
  61   if (_instance != NULL) {
  62     delete _instance;
  63     _instance = NULL;
  64   }
  65 }
  66 
  67 JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
  68   _control(NULL),
  69   _global_mspace(NULL),
  70   _thread_local_mspace(NULL),
  71   _transient_mspace(NULL),
  72   _age_mspace(NULL),
  73   _chunkwriter(chunkwriter),
  74   _post_box(post_box) {}
  75 
  76 JfrStorage::~JfrStorage() {
  77   if (_control != NULL) {
  78     delete _control;
  79   }
  80   if (_global_mspace != NULL) {
  81     delete _global_mspace;
  82   }
  83   if (_thread_local_mspace != NULL) {
  84     delete _thread_local_mspace;
  85   }
  86   if (_transient_mspace != NULL) {
  87     delete _transient_mspace;
  88   }
  89   if (_age_mspace != NULL) {
  90     delete _age_mspace;
  91   }
  92   _instance = NULL;
  93 }
  94 
  95 static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
  96 static const size_t unlimited_mspace_size = 0;
  97 static const size_t thread_local_cache_count = 8;
  98 static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
  99 static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
 100 
 101 template <typename Mspace>
 102 static Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
 103   Mspace* mspace = new Mspace(buffer_size, limit, cache_count, storage_instance);
 104   if (mspace != NULL) {
 105     mspace->initialize();
 106   }
 107   return mspace;
 108 }
 109 
 110 bool JfrStorage::initialize() {
 111   assert(_control == NULL, "invariant");
 112   assert(_global_mspace == NULL, "invariant");
 113   assert(_thread_local_mspace == NULL, "invariant");
 114   assert(_transient_mspace == NULL, "invariant");
 115   assert(_age_mspace == NULL, "invariant");
 116 
 117   const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
 118   assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
 119   const size_t memory_size = (size_t)JfrOptionSet::memory_size();
 120   const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
 121   const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
 122 
 123   _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
 124   if (_control == NULL) {
 125     return false;
 126   }
 127   _global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
 128   if (_global_mspace == NULL) {
 129     return false;
 130   }
 131   _thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
 132   if (_thread_local_mspace == NULL) {
 133     return false;
 134   }
 135   _transient_mspace = create_mspace<JfrStorageMspace>(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
 136   if (_transient_mspace == NULL) {
 137     return false;
 138   }
 139   _age_mspace = create_mspace<JfrStorageAgeMspace>(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
 140   if (_age_mspace == NULL) {
 141     return false;
 142   }
 143   control().set_scavenge_threshold(thread_local_scavenge_threshold);
 144   return true;
 145 }
 146 
 147 JfrStorageControl& JfrStorage::control() {
 148   return *instance()._control;
 149 }
 150 
 151 static void log_allocation_failure(const char* msg, size_t size) {
 152   if (LogJFR) tty->print_cr("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
 153 }
 154 
 155 BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
 156   BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
 157   if (buffer == NULL) {
 158     log_allocation_failure("thread local_memory", size);
 159     return NULL;
 160   }
 161   assert(buffer->acquired_by_self(), "invariant");
 162   return buffer;
 163 }
 164 
 165 BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
 166   BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
 167   if (buffer == NULL) {
 168     log_allocation_failure("transient memory", size);
 169     return NULL;
 170   }
 171   assert(buffer->acquired_by_self(), "invariant");
 172   assert(buffer->transient(), "invariant");
 173   assert(buffer->lease(), "invariant");
 174   return buffer;
 175 }
 176 
 177 static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
 178   assert(size <= mspace->min_elem_size(), "invariant");
 179   while (true) {
 180     BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
 181     if (t == NULL && storage_instance.control().should_discard()) {
 182       storage_instance.discard_oldest(thread);
 183       continue;
 184     }
 185     return t;
 186   }
 187 }
 188 
 189 static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
 190   assert(size <= mspace->min_elem_size(), "invariant");
 191   while (true) {
 192     BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
 193     if (t == NULL && storage_instance.control().should_discard()) {
 194       storage_instance.discard_oldest(thread);
 195       continue;
 196     }
 197     return t;
 198   }
 199 }
 200 
 201 static const size_t lease_retry = 10;
 202 
 203 BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
 204   JfrStorage& storage_instance = instance();
 205   const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
 206   // if not too large and capacity is still available, ask for a lease from the global system
 207   if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
 208     BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
 209     if (buffer != NULL) {
 210       assert(buffer->acquired_by_self(), "invariant");
 211       assert(!buffer->transient(), "invariant");
 212       assert(buffer->lease(), "invariant");
 213       storage_instance.control().increment_leased();
 214       return buffer;
 215     }
 216   }
 217   return acquire_transient(size, thread);
 218 }
 219 
 220 static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
 221   assert(buffer != NULL, "invariant");
 222   assert(buffer->empty(), "invariant");
 223   const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
 224   if (EventDataLoss::is_enabled()) {
 225     JfrNativeEventWriter writer(buffer, thread);
 226     writer.write<u8>(EventDataLoss::eventId);
 227     writer.write(JfrTicks::now());
 228     writer.write(unflushed_size);
 229     writer.write(total_data_loss);
 230   }
 231 }
 232 
 233 static void write_data_loss(BufferPtr buffer, Thread* thread) {
 234   assert(buffer != NULL, "invariant");
 235   const size_t unflushed_size = buffer->unflushed_size();
 236   buffer->concurrent_reinitialization();
 237   if (unflushed_size == 0) {
 238     return;
 239   }
 240   write_data_loss_event(buffer, unflushed_size, thread);
 241 }
 242 
 243 static const size_t promotion_retry = 100;
 244 
 245 bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
 246   assert(buffer != NULL, "invariant");
 247   assert(!buffer->lease(), "invariant");
 248   assert(!buffer->transient(), "invariant");
 249   const size_t unflushed_size = buffer->unflushed_size();
 250   if (unflushed_size == 0) {
 251     buffer->concurrent_reinitialization();
 252     assert(buffer->empty(), "invariant");
 253     return true;
 254   }
 255   BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
 256   if (promotion_buffer == NULL) {
 257     write_data_loss(buffer, thread);
 258     return false;
 259   }
 260   if (!JfrRecorder::is_shutting_down()) {
 261       assert(promotion_buffer->acquired_by_self(), "invariant");
 262   }
 263   assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
 264   buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
 265   assert(buffer->empty(), "invariant");
 266   return true;
 267 }
 268 
 269 /*
 270 * 1. If the buffer was a "lease" from the global system, release back.
 271 * 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
 272 *
 273 * The buffer is effectively invalidated for the thread post-return,
 274 * and the caller should take means to ensure that it is not referenced any longer.
 275 */
 276 void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
 277   assert(buffer != NULL, "invariant");
 278   assert(buffer->lease(), "invariant");
 279   assert(buffer->acquired_by_self(), "invariant");
 280   buffer->clear_lease();
 281   if (buffer->transient()) {
 282     buffer->set_retired();
 283     register_full(buffer, thread);
 284   } else {
 285     buffer->release();
 286     control().decrement_leased();
 287   }
 288 }
 289 
 290 static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
 291   assert(buffer != NULL, "invariant");
 292   assert(age_mspace != NULL, "invariant");
 293   return mspace_allocate_transient(0, age_mspace, thread);
 294 }
 295 
 296 static void log_registration_failure(size_t unflushed_size) {
 297   if (LogJFR) tty->print_cr("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
 298   if (LogJFR) tty->print_cr("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
 299 }
 300 
 301 static void handle_registration_failure(BufferPtr buffer) {
 302   assert(buffer != NULL, "invariant");
 303   assert(buffer->retired(), "invariant");
 304   const size_t unflushed_size = buffer->unflushed_size();
 305   buffer->reinitialize();
 306   log_registration_failure(unflushed_size);
 307 }
 308 
 309 static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
 310   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 311   return mspace_get_free_with_detach(0, age_mspace, thread);
 312 }
 313 
 314 static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
 315   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 316   assert(age_node->retired_buffer()->retired(), "invariant");
 317   age_mspace->insert_full_head(age_node);
 318   return true;
 319 }
 320 
 321 static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
 322   assert(buffer != NULL, "invariant");
 323   assert(buffer->retired(), "invariant");
 324   assert(age_mspace != NULL, "invariant");
 325   MutexLockerEx lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 326   JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
 327   if (age_node == NULL) {
 328     age_node = new_age_node(buffer, age_mspace, thread);
 329     if (age_node == NULL) {
 330       return false;
 331     }
 332   }
 333   assert(age_node->acquired_by_self(), "invariant");
 334   assert(age_node != NULL, "invariant");
 335   age_node->set_retired_buffer(buffer);
 336   control.increment_full();
 337   return insert_full_age_node(age_node, age_mspace, thread);
 338 }
 339 
 340 void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
 341   assert(buffer != NULL, "invariant");
 342   assert(buffer->retired(), "invariant");
 343   if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
 344     handle_registration_failure(buffer);
 345     buffer->release();
 346   }
 347   if (control().should_post_buffer_full_message()) {
 348     _post_box.post(MSG_FULLBUFFER);
 349   }
 350 }
 351 
 352 void JfrStorage::lock() {
 353   assert(!JfrBuffer_lock->owned_by_self(), "invariant");
 354   JfrBuffer_lock->lock_without_safepoint_check();
 355 }
 356 
 357 void JfrStorage::unlock() {
 358   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 359   JfrBuffer_lock->unlock();
 360 }
 361 
 362 #ifdef ASSERT
 363 bool JfrStorage::is_locked() const {
 364   return JfrBuffer_lock->owned_by_self();
 365 }
 366 #endif
 367 
 368 // don't use buffer on return, it is gone
 369 void JfrStorage::release(BufferPtr buffer, Thread* thread) {
 370   assert(buffer != NULL, "invariant");
 371   assert(!buffer->lease(), "invariant");
 372   assert(!buffer->transient(), "invariant");
 373   assert(!buffer->retired(), "invariant");
 374   if (!buffer->empty()) {
 375     if (!flush_regular_buffer(buffer, thread)) {
 376       buffer->concurrent_reinitialization();
 377     }
 378   }
 379   assert(buffer->empty(), "invariant");
 380   control().increment_dead();
 381   buffer->release();
 382   buffer->set_retired();
 383 }
 384 
 385 void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
 386   assert(buffer != NULL, "invariant");
 387   JfrStorage& storage_instance = instance();
 388   storage_instance.release(buffer, thread);
 389   if (storage_instance.control().should_scavenge()) {
 390     storage_instance._post_box.post(MSG_DEADBUFFER);
 391   }
 392 }
 393 
 394 static void log_discard(size_t count, size_t amount, size_t current) {
 395   assert(count > 0, "invariant");
 396   if (LogJFR) tty->print_cr("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
 397   if (LogJFR) tty->print_cr("Current number of full buffers " SIZE_FORMAT "", current);
 398 }
 399 
 400 void JfrStorage::discard_oldest(Thread* thread) {
 401   if (JfrBuffer_lock->try_lock()) {
 402     if (!control().should_discard()) {
 403       // another thread handled it
 404       return;
 405     }
 406     const size_t num_full_pre_discard = control().full_count();
 407     size_t num_full_post_discard = 0;
 408     size_t discarded_size = 0;
 409     while (true) {
 410       JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
 411       if (oldest_age_node == NULL) {
 412         break;
 413       }
 414       BufferPtr const buffer = oldest_age_node->retired_buffer();
 415       assert(buffer->retired(), "invariant");
 416       discarded_size += buffer->unflushed_size();
 417       num_full_post_discard = control().decrement_full();
 418       if (buffer->transient()) {
 419         mspace_release_full(buffer, _transient_mspace);
 420         mspace_release_full(oldest_age_node, _age_mspace);
 421         continue;
 422       } else {
 423         mspace_release_full(oldest_age_node, _age_mspace);
 424         buffer->reinitialize();
 425         buffer->release(); // pusb
 426         break;
 427       }
 428     }
 429     JfrBuffer_lock->unlock();
 430     const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
 431     if (number_of_discards > 0) {
 432       log_discard(number_of_discards, discarded_size, num_full_post_discard);
 433     }
 434   }
 435 }
 436 
 437 #ifdef ASSERT
 438 typedef const BufferPtr ConstBufferPtr;
 439 
 440 static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
 441   assert(t != NULL, "invariant");
 442   assert(cur != NULL, "invariant");
 443   assert(cur->pos() + used <= cur->end(), "invariant");
 444   assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
 445 }
 446 
 447 static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
 448   assert(t != NULL, "invariant");
 449   assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
 450   assert(cur != NULL, "invariant");
 451   assert(!cur->lease(), "invariant");
 452   assert(cur_pos != NULL, "invariant");
 453   assert(req >= used, "invariant");
 454 }
 455 
 456 static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
 457   assert(cur != NULL, "invariant");
 458   assert(t != NULL, "invariant");
 459   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
 460   assert(req >= used, "invariant");
 461 }
 462 
 463 static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 464   assert(t != NULL, "invariant");
 465   assert(cur != NULL, "invariant");
 466   assert(cur->lease(), "invariant");
 467   assert(cur_pos != NULL, "invariant");
 468   assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
 469   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
 470   assert(req >= used, "invariant");
 471   assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
 472 }
 473 #endif // ASSERT
 474 
 475 BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
 476   debug_only(assert_flush_precondition(cur, used, native, t);)
 477   const u1* const cur_pos = cur->pos();
 478   req += used;
 479   // requested size now encompass the outstanding used size
 480   return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
 481                           instance().flush_regular(cur, cur_pos, used, req, native, t);
 482 }
 483 
 484 BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 485   debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
 486   // A flush is needed before memcpy since a non-large buffer is thread stable
 487   // (thread local). The flush will not modify memory in addresses above pos()
 488   // which is where the "used / uncommitted" data resides. It is therefore both
 489   // possible and valid to migrate data after the flush. This is however only
 490   // the case for stable thread local buffers; it is not the case for large buffers.
 491   if (!cur->empty()) {
 492     flush_regular_buffer(cur, t);
 493   }
 494   assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
 495   if (cur->free_size() >= req) {
 496     // simplest case, no switching of buffers
 497     if (used > 0) {
 498       memcpy(cur->pos(), (void*)cur_pos, used);
 499     }
 500     assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
 501     return cur;
 502   }
 503   // Going for a "larger-than-regular" buffer.
 504   // Shelve the current buffer to make room for a temporary lease.
 505   t->jfr_thread_local()->shelve_buffer(cur);
 506   return provision_large(cur, cur_pos, used, req, native, t);
 507 }
 508 
 509 static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
 510   assert(buffer != NULL, "invariant");
 511   if (native) {
 512     jfr_thread_local->set_native_buffer(buffer);
 513   } else {
 514     jfr_thread_local->set_java_buffer(buffer);
 515   }
 516   return buffer;
 517 }
 518 
 519 static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
 520   JfrThreadLocal* const tl = t->jfr_thread_local();
 521   BufferPtr shelved = tl->shelved_buffer();
 522   assert(shelved != NULL, "invariant");
 523   tl->shelve_buffer(NULL);
 524   // restore shelved buffer back as primary
 525   return store_buffer_to_thread_local(shelved, tl, native);
 526 }
 527 
 528 BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 529   debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
 530   // Can the "regular" buffer (now shelved) accommodate the requested size?
 531   BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
 532   assert(shelved != NULL, "invariant");
 533   if (shelved->free_size() >= req) {
 534     if (req > 0) {
 535       memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
 536     }
 537     // release and invalidate
 538     release_large(cur, t);
 539     return restore_shelved_buffer(native, t);
 540   }
 541   // regular too small
 542   return provision_large(cur, cur_pos,  used, req, native, t);
 543 }
 544 
 545 static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
 546   assert(cur != NULL, "invariant");
 547   assert(t != NULL, "invariant");
 548   if (cur->lease()) {
 549     storage_instance.release_large(cur, t);
 550   }
 551   return restore_shelved_buffer(native, t);
 552 }
 553 
 554 // Always returns a non-null buffer.
 555 // If accommodating the large request fails, the shelved buffer is returned
 556 // even though it might be smaller than the requested size.
 557 // Caller needs to ensure if the size was successfully accommodated.
 558 BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 559   debug_only(assert_provision_large_precondition(cur, used, req, t);)
 560   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
 561   BufferPtr const buffer = acquire_large(req, t);
 562   if (buffer == NULL) {
 563     // unable to allocate and serve the request
 564     return large_fail(cur, native, *this, t);
 565   }
 566   // ok managed to acquire a "large" buffer for the requested size
 567   assert(buffer->free_size() >= req, "invariant");
 568   assert(buffer->lease(), "invariant");
 569   // transfer outstanding data
 570   memcpy(buffer->pos(), (void*)cur_pos, used);
 571   if (cur->lease()) {
 572     release_large(cur, t);
 573     // don't use current anymore, it is gone
 574   }
 575   return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
 576 }
 577 
 578 typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
 579 typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
 580 typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
 581 typedef ConcurrentWriteOpExcludeRetired<WriteOperation> ThreadLocalConcurrentWriteOperation;
 582 
 583 size_t JfrStorage::write() {
 584   const size_t full_size_processed = write_full();
 585   WriteOperation wo(_chunkwriter);
 586   ThreadLocalConcurrentWriteOperation tlwo(wo);
 587   process_full_list(tlwo, _thread_local_mspace);
 588   ConcurrentWriteOperation cwo(wo);
 589   process_free_list(cwo, _global_mspace);
 590   return full_size_processed + wo.processed();
 591 }
 592 
 593 size_t JfrStorage::write_at_safepoint() {
 594   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 595   WriteOperation wo(_chunkwriter);
 596   MutexedWriteOperation writer(wo); // mutexed write mode
 597   process_full_list(writer, _thread_local_mspace);
 598   assert(_transient_mspace->is_free_empty(), "invariant");
 599   process_full_list(writer, _transient_mspace);
 600   assert(_global_mspace->is_full_empty(), "invariant");
 601   process_free_list(writer, _global_mspace);
 602   return wo.processed();
 603 }
 604 
 605 typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
 606 typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
 607 typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
 608 
 609 size_t JfrStorage::clear() {
 610   const size_t full_size_processed = clear_full();
 611   DiscardOperation discarder(concurrent); // concurrent discard mode
 612   process_full_list(discarder, _thread_local_mspace);
 613   assert(_transient_mspace->is_free_empty(), "invariant");
 614   process_full_list(discarder, _transient_mspace);
 615   assert(_global_mspace->is_full_empty(), "invariant");
 616   process_free_list(discarder, _global_mspace);
 617   return full_size_processed + discarder.processed();
 618 }
 619 
 620 static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
 621   if (tail != NULL) {
 622     assert(tail->next() == NULL, "invariant");
 623     assert(head != NULL, "invariant");
 624     assert(head->prev() == NULL, "invariant");
 625     MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 626     age_mspace->insert_free_tail(head, tail, count);
 627   }
 628 }
 629 
 630 template <typename Processor>
 631 static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
 632   assert(age_mspace != NULL, "invariant");
 633   assert(head != NULL, "invariant");
 634   assert(count > 0, "invariant");
 635   JfrAgeNode* node = head;
 636   JfrAgeNode* last = NULL;
 637   while (node != NULL) {
 638     last = node;
 639     BufferPtr const buffer = node->retired_buffer();
 640     assert(buffer != NULL, "invariant");
 641     assert(buffer->retired(), "invariant");
 642     processor.process(buffer);
 643     // at this point, buffer is already live or destroyed
 644     node->clear_identity();
 645     JfrAgeNode* const next = (JfrAgeNode*)node->next();
 646     if (node->transient()) {
 647       // detach
 648       last = (JfrAgeNode*)last->prev();
 649       if (last != NULL) {
 650         last->set_next(next);
 651       } else {
 652         head = next;
 653       }
 654       if (next != NULL) {
 655         next->set_prev(last);
 656       }
 657       --count;
 658       age_mspace->deallocate(node);
 659     }
 660     node = next;
 661   }
 662   insert_free_age_nodes(age_mspace, head, last, count);
 663 }
 664 
 665 template <typename Processor>
 666 static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
 667   assert(age_mspace != NULL, "invariant");
 668   if (age_mspace->is_full_empty()) {
 669     // nothing to do
 670     return 0;
 671   }
 672   size_t count;
 673   JfrAgeNode* head;
 674   {
 675     // fetch age list
 676     MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 677     count = age_mspace->full_count();
 678     head = age_mspace->clear_full();
 679     control.reset_full();
 680   }
 681   assert(head != NULL, "invariant");
 682   assert(count > 0, "invariant");
 683   process_age_list(processor, age_mspace, head, count);
 684   return count;
 685 }
 686 
 687 static void log(size_t count, size_t amount, bool clear = false) {
 688   if (count > 0) {
 689     if (LogJFR) tty->print_cr("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
 690       clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
 691   }
 692 }
 693 
 694 // full writer
 695 // Assumption is retired only; exclusive access
 696 // MutexedWriter -> ReleaseOp
 697 //
 698 size_t JfrStorage::write_full() {
 699   assert(_chunkwriter.is_valid(), "invariant");
 700   Thread* const thread = Thread::current();
 701   WriteOperation wo(_chunkwriter);
 702   MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
 703   ReleaseOperation ro(_transient_mspace, thread);
 704   FullOperation cmd(&writer, &ro);
 705   const size_t count = process_full(cmd, control(), _age_mspace);
 706   log(count, writer.processed());
 707   return writer.processed();
 708 }
 709 
 710 size_t JfrStorage::clear_full() {
 711   DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
 712   const size_t count = process_full(discarder, control(), _age_mspace);
 713   log(count, discarder.processed(), true);
 714   return discarder.processed();
 715 }
 716 
 717 static void scavenge_log(size_t count, size_t amount, size_t current) {
 718   if (count > 0) {
 719     if (LogJFR) tty->print_cr("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
 720     if (LogJFR) tty->print_cr("Current number of dead buffers " SIZE_FORMAT "", current);
 721   }
 722 }
 723 
 724 template <typename Mspace>
 725 class Scavenger {
 726 private:
 727   JfrStorageControl& _control;
 728   Mspace* _mspace;
 729   size_t _count;
 730   size_t _amount;
 731 public:
 732   typedef typename Mspace::Type Type;
 733   Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
 734   bool process(Type* t) {
 735     if (t->retired()) {
 736       assert(!t->transient(), "invariant");
 737       assert(!t->lease(), "invariant");
 738       assert(t->empty(), "invariant");
 739       assert(t->identity() == NULL, "invariant");
 740       ++_count;
 741       _amount += t->total_size();
 742       t->clear_retired();
 743       _control.decrement_dead();
 744       mspace_release_full_critical(t, _mspace);
 745     }
 746     return true;
 747   }
 748   size_t processed() const { return _count; }
 749   size_t amount() const { return _amount; }
 750 };
 751 
 752 size_t JfrStorage::scavenge() {
 753   JfrStorageControl& ctrl = control();
 754   if (ctrl.dead_count() == 0) {
 755     return 0;
 756   }
 757   Scavenger<JfrThreadLocalMspace> scavenger(ctrl, _thread_local_mspace);
 758   process_full_list(scavenger, _thread_local_mspace);
 759   scavenge_log(scavenger.processed(), scavenger.amount(), ctrl.dead_count());
 760   return scavenger.processed();
 761 }