1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/threadCritical.hpp"
  27 #include "services/memTracker.hpp"
  28 #include "services/memTrackWorker.hpp"
  29 #include "utilities/decoder.hpp"
  30 #include "utilities/vmError.hpp"
  31 
  32 
  33 void GenerationData::reset() {
  34   _number_of_classes = 0;
  35   while (_recorder_list != NULL) {
  36     MemRecorder* tmp = _recorder_list;
  37     _recorder_list = _recorder_list->next();
  38     MemTracker::release_thread_recorder(tmp);
  39   }
  40 }
  41 
  42 MemTrackWorker::MemTrackWorker() {
  43   // create thread uses cgc thread type for now. We should revisit
  44   // the option, or create new thread type.
  45   _has_error = !os::create_thread(this, os::cgc_thread);
  46   set_name("MemTrackWorker", 0);
  47 
  48   // initial generation circuit buffer
  49   if (!has_error()) {
  50     _head = _tail = 0;
  51     for(int index = 0; index < MAX_GENERATIONS; index ++) {
  52       ::new ((void*)&_gen[index]) GenerationData();
  53     }
  54   }
  55   NOT_PRODUCT(_sync_point_count = 0;)
  56   NOT_PRODUCT(_merge_count = 0;)
  57   NOT_PRODUCT(_last_gen_in_use = 0;)
  58 }
  59 
  60 MemTrackWorker::~MemTrackWorker() {
  61   for (int index = 0; index < MAX_GENERATIONS; index ++) {
  62     _gen[index].reset();
  63   }
  64 }
  65 
  66 void* MemTrackWorker::operator new(size_t size) {
  67   assert(false, "use nothrow version");
  68   return NULL;
  69 }
  70 
  71 void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
  72   return allocate(size, false, mtNMT);
  73 }
  74 
  75 void MemTrackWorker::start() {
  76   os::start_thread(this);
  77 }
  78 
  79 /*
  80  * Native memory tracking worker thread loop:
  81  *   1. merge one generation of memory recorders to staging area
  82  *   2. promote staging data to memory snapshot
  83  *
  84  * This thread can run through safepoint.
  85  */
  86 
  87 void MemTrackWorker::run() {
  88   assert(MemTracker::is_on(), "native memory tracking is off");
  89   this->initialize_thread_local_storage();
  90   this->record_stack_base_and_size();
  91   MemSnapshot* snapshot = MemTracker::get_snapshot();
  92   assert(snapshot != NULL, "Worker should not be started");
  93   MemRecorder* rec;
  94   unsigned long processing_generation = 0;
  95   bool          worker_idle = false;
  96 
  97   while (!MemTracker::shutdown_in_progress()) {
  98     NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
  99     {
 100       // take a recorder from earliest generation in buffer
 101       ThreadCritical tc;
 102       rec = _gen[_head].next_recorder();
 103     }
 104     if (rec != NULL) {
 105       if (rec->get_generation() != processing_generation || worker_idle) {
 106         processing_generation = rec->get_generation();
 107         worker_idle = false; 
 108         MemTracker::set_current_processing_generation(processing_generation);
 109       }
 110 
 111       // merge the recorder into staging area
 112       if (!snapshot->merge(rec)) {
 113         MemTracker::shutdown(MemTracker::NMT_out_of_memory);
 114       } else {
 115         NOT_PRODUCT(_merge_count ++;)
 116       }
 117       MemTracker::release_thread_recorder(rec);
 118     } else {
 119       // no more recorder to merge, promote staging area
 120       // to snapshot
 121       if (_head != _tail) {
 122         long number_of_classes;
 123         {
 124           ThreadCritical tc;
 125           if (_gen[_head].has_more_recorder() || _head == _tail) {
 126             continue;
 127           }
 128           number_of_classes = _gen[_head].number_of_classes();
 129           _gen[_head].reset();
 130 
 131           // done with this generation, increment _head pointer
 132           _head = (_head + 1) % MAX_GENERATIONS;
 133         }
 134         // promote this generation data to snapshot
 135         if (!snapshot->promote(number_of_classes)) {
 136           // failed to promote, means out of memory
 137           MemTracker::shutdown(MemTracker::NMT_out_of_memory);
 138         }
 139       } else {
 140         // worker thread is idle
 141         worker_idle = true;
 142         MemTracker::report_worker_idle();
 143         snapshot->wait(1000);
 144         ThreadCritical tc;
 145         // check if more data arrived
 146         if (!_gen[_head].has_more_recorder()) {
 147           _gen[_head].add_recorders(MemTracker::get_pending_recorders());
 148         }
 149       }
 150     }
 151   }
 152   assert(MemTracker::shutdown_in_progress(), "just check");
 153 
 154   // transits to final shutdown
 155   MemTracker::final_shutdown();
 156 }
 157 
 158 // at synchronization point, where 'safepoint visible' Java threads are blocked
 159 // at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
 160 // The caller MemTracker::sync() already takes ThreadCritical before calling this
 161 // method.
 162 //
 163 // Following tasks are performed:
 164 //   1. add all recorders in pending queue to current generation
 165 //   2. increase generation
 166 
 167 void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
 168   NOT_PRODUCT(_sync_point_count ++;)
 169   assert(count_recorder(rec) <= MemRecorder::_instance_count,
 170     "pending queue has infinite loop");
 171 
 172   bool out_of_generation_buffer = false;
 173   // check shutdown state inside ThreadCritical
 174   if (MemTracker::shutdown_in_progress()) return;
 175 
 176   _gen[_tail].set_number_of_classes(number_of_classes);
 177   // append the recorders to the end of the generation
 178   _gen[_tail].add_recorders(rec);
 179   assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
 180     "after add to current generation has infinite loop");
 181   // we have collected all recorders for this generation. If there is data,
 182   // we need to increment _tail to start a new generation.
 183   if (_gen[_tail].has_more_recorder()  || _head == _tail) {
 184     _tail = (_tail + 1) % MAX_GENERATIONS;
 185     out_of_generation_buffer = (_tail == _head);
 186   }
 187 
 188   if (out_of_generation_buffer) {
 189     MemTracker::shutdown(MemTracker::NMT_out_of_generation);
 190   }
 191 }
 192 
 193 #ifndef PRODUCT
 194 int MemTrackWorker::count_recorder(const MemRecorder* head) {
 195   int count = 0;
 196   while(head != NULL) {
 197     count ++;
 198     head = head->next();
 199   }
 200   return count;
 201 }
 202 
 203 int MemTrackWorker::count_pending_recorders() const {
 204   int count = 0;
 205   for (int index = 0; index < MAX_GENERATIONS; index ++) {
 206     MemRecorder* head = _gen[index].peek();
 207     if (head != NULL) {
 208       count += count_recorder(head);
 209     }
 210   }
 211   return count;
 212 }
 213 #endif