1 /*
   2  * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jfr/jni/jfrJavaSupport.hpp"
  27 #include "jfr/recorder/jfrRecorder.hpp"
  28 #include "jfr/recorder/access/jfrOptionSet.hpp"
  29 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
  30 #include "jfr/recorder/service/jfrPostBox.hpp"
  31 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
  32 #include "jfr/recorder/storage/jfrStorage.hpp"
  33 #include "jfr/recorder/storage/jfrStorageControl.hpp"
  34 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
  35 #include "jfr/utilities/jfrIterator.hpp"
  36 #include "jfr/utilities/jfrLog.hpp"
  37 
  38 #include "runtime/mutexLocker.hpp"
  39 #include "runtime/orderAccess.inline.hpp"
  40 #include "runtime/safepoint.hpp"
  41 #include "runtime/thread.hpp"
  42 #include "trace/tracing.hpp"
  43 
  44 typedef JfrStorage::Buffer* BufferPtr;
  45 
  46 static JfrStorage* _instance = NULL;
  47 static JfrStorageControl* _control;
  48 
  49 JfrStorage& JfrStorage::instance() {
  50   return *_instance;
  51 }
  52 
  53 JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
  54   assert(_instance == NULL, "invariant");
  55   _instance = new JfrStorage(chunkwriter, post_box);
  56   return _instance;
  57 }
  58 
  59 void JfrStorage::destroy() {
  60   if (_instance != NULL) {
  61     delete _instance;
  62     _instance = NULL;
  63   }
  64 }
  65 
  66 JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
  67   _control(NULL),
  68   _global_mspace(NULL),
  69   _thread_local_mspace(NULL),
  70   _transient_mspace(NULL),
  71   _age_mspace(NULL),
  72   _chunkwriter(chunkwriter),
  73   _post_box(post_box) {}
  74 
  75 JfrStorage::~JfrStorage() {
  76   if (_control != NULL) {
  77     delete _control;
  78   }
  79   if (_global_mspace != NULL) {
  80     delete _global_mspace;
  81   }
  82   if (_thread_local_mspace != NULL) {
  83     delete _thread_local_mspace;
  84   }
  85   if (_transient_mspace != NULL) {
  86     delete _transient_mspace;
  87   }
  88   if (_age_mspace != NULL) {
  89     delete _age_mspace;
  90   }
  91   _instance = NULL;
  92 }
  93 
  94 static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
  95 static const size_t unlimited_mspace_size = 0;
  96 static const size_t thread_local_cache_count = 8;
  97 static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
  98 static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
  99 
 100 static JfrStorageMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
 101   JfrStorageMspace* mspace = new JfrStorageMspace(buffer_size, limit, cache_count, storage_instance);
 102   if (mspace != NULL) {
 103     mspace->initialize();
 104   }
 105   return mspace;
 106 }
 107 
 108 bool JfrStorage::initialize() {
 109   assert(_control == NULL, "invariant");
 110   assert(_global_mspace == NULL, "invariant");
 111   assert(_thread_local_mspace == NULL, "invariant");
 112   assert(_transient_mspace == NULL, "invariant");
 113   assert(_age_mspace == NULL, "invariant");
 114 
 115   const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
 116   assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
 117   const size_t memory_size = (size_t)JfrOptionSet::memory_size();
 118   const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
 119   const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
 120 
 121   _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
 122   if (_control == NULL) {
 123     return false;
 124   }
 125   _global_mspace = create_mspace(global_buffer_size, memory_size, num_global_buffers, this);
 126   if (_global_mspace == NULL) {
 127     return false;
 128   }
 129   _thread_local_mspace = create_mspace(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
 130   if (_thread_local_mspace == NULL) {
 131     return false;
 132   }
 133   _transient_mspace = create_mspace(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
 134   if (_transient_mspace == NULL) {
 135     return false;
 136   }
 137   _age_mspace = new JfrStorageAgeMspace(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
 138 
 139   if (_age_mspace == NULL || !_age_mspace->initialize()) {
 140     return false;
 141   }
 142   control().set_scavenge_threshold(thread_local_scavenge_threshold);
 143   return true;
 144 }
 145 
 146 JfrStorageControl& JfrStorage::control() {
 147   return *instance()._control;
 148 }
 149 
 150 static void log_allocation_failure(const char* msg, size_t size) {
 151   log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
 152 }
 153 
 154 BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
 155   BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
 156   if (buffer == NULL) {
 157     log_allocation_failure("thread local_memory", size);
 158     return NULL;
 159   }
 160   assert(buffer->acquired_by_self(), "invariant");
 161   return buffer;
 162 }
 163 
 164 BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
 165   BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
 166   if (buffer == NULL) {
 167     log_allocation_failure("transient memory", size);
 168     return NULL;
 169   }
 170   assert(buffer->acquired_by_self(), "invariant");
 171   assert(buffer->transient(), "invariant");
 172   assert(buffer->lease(), "invariant");
 173   return buffer;
 174 }
 175 
 176 static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
 177   assert(size <= mspace->min_elem_size(), "invariant");
 178   while (true) {
 179     BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
 180     if (t == NULL && storage_instance.control().should_discard()) {
 181       storage_instance.discard_oldest(thread);
 182       continue;
 183     }
 184     return t;
 185   }
 186 }
 187 
 188 static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
 189   assert(size <= mspace->min_elem_size(), "invariant");
 190   while (true) {
 191     BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
 192     if (t == NULL && storage_instance.control().should_discard()) {
 193       storage_instance.discard_oldest(thread);
 194       continue;
 195     }
 196     return t;
 197   }
 198 }
 199 
 200 static const size_t lease_retry = 10;
 201 
 202 BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
 203   JfrStorage& storage_instance = instance();
 204   const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
 205   // if not too large and capacity is still available, ask for a lease from the global system
 206   if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
 207     BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
 208     if (buffer != NULL) {
 209       assert(buffer->acquired_by_self(), "invariant");
 210       assert(!buffer->transient(), "invariant");
 211       assert(buffer->lease(), "invariant");
 212       storage_instance.control().increment_leased();
 213       return buffer;
 214     }
 215   }
 216   return acquire_transient(size, thread);
 217 }
 218 
 219 static void write_data_loss_event(size_t unflushed_size, Thread* thread) {
 220   const u8 total_data_loss = thread->trace_data()->add_data_lost(unflushed_size);
 221   if (EventDataLoss::is_enabled()) {
 222     EventDataLoss dataloss;
 223     dataloss.set_amount(unflushed_size);
 224     dataloss.set_total(total_data_loss);
 225     dataloss.commit();
 226   }
 227 }
 228 
 229 static void write_data_loss(BufferPtr buffer, Thread* thread) {
 230   assert(buffer != NULL, "invariant");
 231   const size_t unflushed_size = buffer->unflushed_size();
 232   buffer->concurrent_reinitialization();
 233   if (unflushed_size == 0) {
 234     return;
 235   }
 236   assert(buffer->empty(), "invariant");
 237   write_data_loss_event(unflushed_size, thread);
 238 }
 239 
 240 static const size_t promotion_retry = 100;
 241 
 242 bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
 243   assert(buffer != NULL, "invariant");
 244   assert(!buffer->lease(), "invariant");
 245   assert(!buffer->transient(), "invariant");
 246   const size_t unflushed_size = buffer->unflushed_size();
 247   if (unflushed_size == 0) {
 248     buffer->concurrent_reinitialization();
 249     assert(buffer->empty(), "invariant");
 250     return true;
 251   }
 252   BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
 253   if (promotion_buffer == NULL) {
 254     write_data_loss(buffer, thread);
 255     return false;
 256   }
 257   assert(promotion_buffer->acquired_by_self(), "invariant");
 258   assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
 259   buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
 260   assert(buffer->empty(), "invariant");
 261   return true;
 262 }
 263 
 264 /*
 265 * 1. If the buffer was a "lease" from the global system, release back.
 266 * 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
 267 *
 268 * The buffer is effectively invalidated for the thread post-return,
 269 * and the caller should take means to ensure that it is not referenced any longer.
 270 */
 271 void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
 272   assert(buffer != NULL, "invariant");
 273   assert(buffer->lease(), "invariant");
 274   assert(buffer->acquired_by_self(), "invariant");
 275   buffer->clear_lease();
 276   if (buffer->transient()) {
 277     buffer->set_retired();
 278     register_full(buffer, thread);
 279   } else {
 280     buffer->release();
 281     control().decrement_leased();
 282   }
 283 }
 284 
 285 static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
 286   assert(buffer != NULL, "invariant");
 287   assert(age_mspace != NULL, "invariant");
 288   return mspace_allocate_transient(0, age_mspace, thread);
 289 }
 290 
 291 static void log_registration_failure(size_t unflushed_size) {
 292   log_warning(jfr)("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
 293   log_debug(jfr, system)("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
 294 }
 295 
 296 static void handle_registration_failure(BufferPtr buffer) {
 297   assert(buffer != NULL, "invariant");
 298   assert(buffer->retired(), "invariant");
 299   const size_t unflushed_size = buffer->unflushed_size();
 300   buffer->reinitialize();
 301   log_registration_failure(unflushed_size);
 302 }
 303 
 304 static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
 305   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 306   return mspace_get_free_with_detach(0, age_mspace, thread);
 307 }
 308 
 309 static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
 310   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 311   assert(age_node->retired_buffer()->retired(), "invariant");
 312   age_mspace->insert_full_head(age_node);
 313   return true;
 314 }
 315 
 316 static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
 317   assert(buffer != NULL, "invariant");
 318   assert(buffer->retired(), "invariant");
 319   assert(age_mspace != NULL, "invariant");
 320   MutexLockerEx lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 321   JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
 322   if (age_node == NULL) {
 323     age_node = new_age_node(buffer, age_mspace, thread);
 324     if (age_node == NULL) {
 325       return false;
 326     }
 327   }
 328   assert(age_node->acquired_by_self(), "invariant");
 329   assert(age_node != NULL, "invariant");
 330   age_node->set_retired_buffer(buffer);
 331   control.increment_full();
 332   return insert_full_age_node(age_node, age_mspace, thread);
 333 }
 334 
 335 void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
 336   assert(buffer != NULL, "invariant");
 337   assert(buffer->retired(), "invariant");
 338   if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
 339     handle_registration_failure(buffer);
 340     buffer->release();
 341   }
 342   if (control().should_post_buffer_full_message()) {
 343     _post_box.post(MSG_FULLBUFFER);
 344   }
 345 }
 346 
 347 void JfrStorage::lock() {
 348   assert(!JfrBuffer_lock->owned_by_self(), "invariant");
 349   JfrBuffer_lock->lock_without_safepoint_check();
 350 }
 351 
 352 void JfrStorage::unlock() {
 353   assert(JfrBuffer_lock->owned_by_self(), "invariant");
 354   JfrBuffer_lock->unlock();
 355 }
 356 
 357 // don't use buffer on return, it is gone
 358 void JfrStorage::release(BufferPtr buffer, Thread* thread) {
 359   assert(buffer != NULL, "invariant");
 360   assert(!buffer->lease(), "invariant");
 361   assert(!buffer->transient(), "invariant");
 362   assert(!buffer->retired(), "invariant");
 363   if (!buffer->empty()) {
 364     if (!flush_regular_buffer(buffer, thread)) {
 365       buffer->concurrent_reinitialization();
 366     }
 367   }
 368   assert(buffer->empty(), "invariant");
 369   control().increment_dead();
 370   buffer->release();
 371   buffer->set_retired();
 372 }
 373 
 374 void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
 375   assert(buffer != NULL, "invariant");
 376   JfrStorage& storage_instance = instance();
 377   storage_instance.release(buffer, thread);
 378   if (storage_instance.control().should_scavenge()) {
 379     storage_instance._post_box.post(MSG_DEADBUFFER);
 380   }
 381 }
 382 
 383 static void log_discard(size_t count, size_t amount, size_t current) {
 384   if (log_is_enabled(Debug, jfr, system)) {
 385     assert(count > 0, "invariant");
 386     log_debug(jfr, system)("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
 387     log_debug(jfr, system)("Current number of full buffers " SIZE_FORMAT "", current);
 388   }
 389 }
 390 
 391 void JfrStorage::discard_oldest(Thread* thread) {
 392   if (JfrBuffer_lock->try_lock()) {
 393     if (!control().should_discard()) {
 394       // another thread handled it
 395       return;
 396     }
 397     const size_t num_full_pre_discard = control().full_count();
 398     size_t num_full_post_discard = 0;
 399     size_t discarded_size = 0;
 400     while (true) {
 401       JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
 402       if (oldest_age_node == NULL) {
 403         break;
 404       }
 405       BufferPtr const buffer = oldest_age_node->retired_buffer();
 406       assert(buffer->retired(), "invariant");
 407       discarded_size += buffer->unflushed_size();
 408       num_full_post_discard = control().decrement_full();
 409       if (buffer->transient()) {
 410         mspace_release(buffer, _transient_mspace);
 411         mspace_release(oldest_age_node, _age_mspace);
 412         continue;
 413       } else {
 414         mspace_release(oldest_age_node, _age_mspace);
 415         buffer->reinitialize();
 416         buffer->release(); // pusb
 417         break;
 418       }
 419     }
 420     JfrBuffer_lock->unlock();
 421     const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
 422     if (number_of_discards > 0) {
 423       log_discard(number_of_discards, discarded_size, num_full_post_discard);
 424     }
 425   }
 426 }
 427 
 428 #ifdef ASSERT
 429 typedef const BufferPtr ConstBufferPtr;
 430 
 431 static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
 432   assert(t != NULL, "invariant");
 433   assert(cur != NULL, "invariant");
 434   assert(cur->pos() + used <= cur->end(), "invariant");
 435   assert(native ? t->trace_data()->native_buffer() == cur : t->trace_data()->java_buffer() == cur, "invariant");
 436 }
 437 
 438 static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
 439   assert(t != NULL, "invariant");
 440   assert(t->trace_data()->shelved_buffer() == NULL, "invariant");
 441   assert(cur != NULL, "invariant");
 442   assert(!cur->lease(), "invariant");
 443   assert(cur_pos != NULL, "invariant");
 444   assert(req >= used, "invariant");
 445 }
 446 
 447 static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
 448   assert(cur != NULL, "invariant");
 449   assert(t != NULL, "invariant");
 450   assert(t->trace_data()->shelved_buffer() != NULL, "invariant");
 451   assert(req >= used, "invariant");
 452 }
 453 
 454 static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 455   assert(t != NULL, "invariant");
 456   assert(cur != NULL, "invariant");
 457   assert(cur->lease(), "invariant");
 458   assert(cur_pos != NULL, "invariant");
 459   assert(native ? t->trace_data()->native_buffer() == cur : t->trace_data()->java_buffer() == cur, "invariant");
 460   assert(t->trace_data()->shelved_buffer() != NULL, "invariant");
 461   assert(req >= used, "invariant");
 462   assert(cur != t->trace_data()->shelved_buffer(), "invariant");
 463 }
 464 #endif // ASSERT
 465 
 466 BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
 467   debug_only(assert_flush_precondition(cur, used, native, t);)
 468   const u1* const cur_pos = cur->pos();
 469   req += used;
 470   // requested size now encompass the outstanding used size
 471   if (cur->lease()) {
 472     return instance().flush_large(cur, cur_pos, used, req, native, t);
 473   }
 474   // "regular" == !lease
 475   return instance().flush_regular(cur, cur_pos, used, req, native, t);
 476 }
 477 
 478 BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 479   debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
 480   // A flush is needed before memcpy since a non-large buffer is thread stable
 481   // (thread local). The flush will not modify memory in addresses above pos()
 482   // which is where the "used / uncommitted" data resides. It is therefore both
 483   // possible and valid to migrate data after the flush. This is however only
 484   // the case for stable thread local buffers; it is not the case for large buffers.
 485   if (!cur->empty()) {
 486     flush_regular_buffer(cur, t);
 487   }
 488   assert(t->trace_data()->shelved_buffer() == NULL, "invariant");
 489   if (cur->free_size() >= req) {
 490     // simplest case, no switching of buffers
 491     if (used > 0) {
 492       memcpy(cur->pos(), (void*)cur_pos, used);
 493     }
 494     assert(native ? t->trace_data()->native_buffer() == cur : t->trace_data()->java_buffer() == cur, "invariant");
 495     return cur;
 496   }
 497   // Going for a "larger-than-regular" buffer.
 498   // Shelve the current buffer to make room for a temporary lease.
 499   t->trace_data()->shelve_buffer(cur);
 500   return provision_large(cur, cur_pos, used, req, native, t);
 501 }
 502 
 503 static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadData* trace_data, bool native) {
 504   assert(buffer != NULL, "invariant");
 505   if (native) {
 506     trace_data->set_native_buffer(buffer);
 507   } else {
 508    trace_data->set_java_buffer(buffer);
 509   }
 510   return buffer;
 511 }
 512 
 513 static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
 514   JfrThreadData* const trace_data = t->trace_data();
 515   BufferPtr shelved = trace_data->shelved_buffer();
 516   assert(shelved != NULL, "invariant");
 517   t->trace_data()->shelve_buffer(NULL);
 518   // restore shelved buffer back as primary
 519   return store_buffer_to_thread_local(shelved, trace_data, native);
 520 }
 521 
 522 BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 523   debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
 524   // Can the "regular" buffer (now shelved) accommodate the requested size?
 525   BufferPtr shelved = t->trace_data()->shelved_buffer();
 526   assert(shelved != NULL, "invariant");
 527   if (shelved->free_size() >= req) {
 528     if (req > 0) {
 529       memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
 530     }
 531     // release and invalidate
 532     release_large(cur, t);
 533     return restore_shelved_buffer(native, t);
 534   }
 535   // regular too small
 536   return provision_large(cur, cur_pos,  used, req, native, t);
 537 }
 538 
 539 static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
 540   assert(cur != NULL, "invariant");
 541   assert(t != NULL, "invariant");
 542   if (cur->lease()) {
 543     storage_instance.release_large(cur, t);
 544   }
 545   return restore_shelved_buffer(native, t);
 546 }
 547 
 548 // Always returns a non-null buffer.
 549 // If accommodating the large request fails, the shelved buffer is returned
 550 // even though it might be smaller than the requested size.
 551 // Caller needs to ensure if the size was successfully accommodated.
 552 BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
 553   debug_only(assert_provision_large_precondition(cur, used, req, t);)
 554   assert(t->trace_data()->shelved_buffer() != NULL, "invariant");
 555   BufferPtr const buffer = acquire_large(req, t);
 556   if (buffer == NULL) {
 557     // unable to allocate and serve the request
 558     return large_fail(cur, native, *this, t);
 559   }
 560   // ok managed to acquire a "large" buffer for the requested size
 561   assert(buffer->free_size() >= req, "invariant");
 562   assert(buffer->lease(), "invariant");
 563   // transfer outstanding data
 564   memcpy(buffer->pos(), (void*)cur_pos, used);
 565   if (cur->lease()) {
 566     release_large(cur, t);
 567     // don't use current anymore, it is gone
 568   }
 569   return store_buffer_to_thread_local(buffer, t->trace_data(), native);
 570 }
 571 
 572 typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
 573 typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
 574 typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
 575 typedef ThreadLocalReleaseOp<JfrStorageMspace> ThreadLocalRelease;
 576 typedef CompositeOperation<ConcurrentWriteOperation, ThreadLocalRelease> ThreadLocalWriteOperation;
 577 
 578 size_t JfrStorage::write() {
 579   const size_t full_size_processed = write_full();
 580   WriteOperation wo(_chunkwriter);
 581   ConcurrentWriteOperation cwo(wo);
 582   ThreadLocalRelease tlr(_thread_local_mspace);
 583   ThreadLocalWriteOperation tlwo(&cwo, &tlr);
 584   process_full_list(tlwo, _thread_local_mspace);
 585   process_free_list(cwo, _global_mspace);
 586   return full_size_processed + wo.processed();
 587 }
 588 
 589 size_t JfrStorage::write_at_safepoint() {
 590   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 591   WriteOperation wo(_chunkwriter);
 592   MutexedWriteOperation writer(wo); // mutexed write mode
 593   process_full_list(writer, _thread_local_mspace);
 594   assert(_transient_mspace->is_free_empty(), "invariant");
 595   process_full_list(writer, _transient_mspace);
 596   assert(_global_mspace->is_full_empty(), "invariant");
 597   process_free_list(writer, _global_mspace);
 598   return wo.processed();
 599 }
 600 
 601 typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
 602 typedef CompositeOperation<DiscardOperation, ThreadLocalRelease> ThreadLocalDiscardOperation;
 603 typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
 604 typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
 605 
 606 size_t JfrStorage::clear() {
 607   const size_t full_size_processed = clear_full();
 608   DiscardOperation discarder(concurrent); // concurrent discard mode
 609   ThreadLocalRelease tlr(_thread_local_mspace);
 610   ThreadLocalDiscardOperation tldo(&discarder, &tlr);
 611   process_full_list(tldo, _thread_local_mspace);
 612   assert(_transient_mspace->is_free_empty(), "invariant");
 613   process_full_list(discarder, _transient_mspace);
 614   assert(_global_mspace->is_full_empty(), "invariant");
 615   process_free_list(discarder, _global_mspace);
 616   return full_size_processed + discarder.processed();
 617 }
 618 
 619 static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
 620   if (tail != NULL) {
 621     assert(tail->next() == NULL, "invariant");
 622     assert(head != NULL, "invariant");
 623     assert(head->prev() == NULL, "invariant");
 624     MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 625     age_mspace->insert_free_tail(head, tail, count);
 626   }
 627 }
 628 
 629 template <typename Processor>
 630 static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
 631   assert(age_mspace != NULL, "invariant");
 632   assert(head != NULL, "invariant");
 633   JfrAgeNode* node = head;
 634   JfrAgeNode* last = NULL;
 635   while (node != NULL) {
 636     last = node;
 637     BufferPtr const buffer = node->retired_buffer();
 638     assert(buffer != NULL, "invariant");
 639     assert(buffer->retired(), "invariant");
 640     processor.process(buffer);
 641     // at this point, buffer is already live or destroyed
 642     node->clear_identity();
 643     JfrAgeNode* const next = (JfrAgeNode*)node->next();
 644     if (node->transient()) {
 645       // detach
 646       last = (JfrAgeNode*)last->prev();
 647       if (last != NULL) {
 648         last->set_next(next);
 649       } else {
 650         head = next;
 651       }
 652       if (next != NULL) {
 653         next->set_prev(last);
 654       }
 655       --count;
 656       age_mspace->deallocate(node);
 657     }
 658     node = next;
 659   }
 660   insert_free_age_nodes(age_mspace, head, last, count);
 661 }
 662 
 663 template <typename Processor>
 664 static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
 665   assert(age_mspace != NULL, "invariant");
 666   if (age_mspace->is_full_empty()) {
 667     // nothing to do
 668     return 0;
 669   }
 670   size_t count;
 671   JfrAgeNode* head;;
 672   {
 673     // fetch age list
 674     MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
 675     count = age_mspace->full_count();
 676     head = age_mspace->clear_full();
 677     control.reset_full();
 678   }
 679   assert(head != NULL, "invariant");
 680   process_age_list(processor, age_mspace, head, count);
 681   return count;
 682 }
 683 
 684 static void log(size_t count, size_t amount, bool clear = false) {
 685   if (log_is_enabled(Debug, jfr, system)) {
 686     if (count > 0) {
 687       log_debug(jfr, system)( "%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
 688         clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
 689     }
 690   }
 691 }
 692 
 693 // full writer
 694 // Assumption is retired only; exclusive access
 695 // MutexedWriter -> ReleaseOp
 696 //
 697 size_t JfrStorage::write_full() {
 698   assert(_chunkwriter.is_valid(), "invariant");
 699   WriteOperation wo(_chunkwriter);
 700   MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
 701   ReleaseOperation ro(_transient_mspace);
 702   FullOperation cmd(&writer, &ro);
 703   const size_t count = process_full(cmd, control(), _age_mspace);
 704   log(count, writer.processed());
 705   return writer.processed();
 706 }
 707 
 708 size_t JfrStorage::clear_full() {
 709   DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
 710   const size_t count = process_full(discarder, control(), _age_mspace);
 711   log(count, discarder.processed(), true);
 712   return discarder.processed();
 713 }
 714 
 715 static void scavenge_log(size_t count, size_t amount, size_t current) {
 716   if (count > 0) {
 717     if (log_is_enabled(Debug, jfr, system)) {
 718       log_debug(jfr, system)("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
 719       log_debug(jfr, system)("Current number of dead buffers " SIZE_FORMAT "", current);
 720     }
 721   }
 722 }
 723 
 724 template <typename Mspace>
 725 class Scavenger {
 726  private:
 727   JfrStorageControl& _control;
 728   Mspace* _mspace;
 729   size_t _count;
 730   size_t _amount;
 731  public:
 732   typedef typename Mspace::Type Type;
 733   Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
 734   bool process(Type* t) {
 735     if (t->retired()) {
 736       assert(!t->transient(), "invariant");
 737       assert(!t->lease(), "invariant");
 738       assert(t->identity() == NULL, "invariant");
 739       assert(t->empty(), "invariant");
 740       ++_count;
 741       _amount += t->total_size();
 742       _control.decrement_dead();
 743       t->clear_retired();
 744       mspace_release_critical(t, _mspace);
 745     }
 746     return true;
 747   }
 748   size_t processed() const { return _count; }
 749   size_t amount() const { return _amount; }
 750 };
 751 
 752 size_t JfrStorage::scavenge() {
 753   if (control().dead_count() > 0) {
 754     Scavenger<JfrStorageMspace> scavenger(control(), _thread_local_mspace);
 755     process_full_list(scavenger, _thread_local_mspace);
 756     scavenge_log(scavenger.processed(), scavenger.amount(), control().dead_count());
 757     return scavenger.processed();
 758   }
 759   return 0;
 760 }