1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/threadCritical.hpp"
  27 #include "services/memTracker.hpp"
  28 #include "services/memTrackWorker.hpp"
  29 #include "utilities/decoder.hpp"
  30 #include "utilities/vmError.hpp"
  31 
  32 
  33 void GenerationData::reset() {
  34   _number_of_classes = 0;
  35   while (_recorder_list != NULL) {
  36     MemRecorder* tmp = _recorder_list;
  37     _recorder_list = _recorder_list->next();
  38     MemTracker::release_thread_recorder(tmp);
  39   }
  40 }
  41 
  42 MemTrackWorker::MemTrackWorker() {
  43   // create thread uses cgc thread type for now. We should revisit
  44   // the option, or create new thread type.
  45   _has_error = !os::create_thread(this, os::cgc_thread);
  46   set_name("MemTrackWorker", 0);
  47 
  48   // initial generation circuit buffer
  49   if (!has_error()) {
  50     _head = _tail = 0;
  51     for(int index = 0; index < MAX_GENERATIONS; index ++) {
  52       ::new ((void*)&_gen[index]) GenerationData();
  53     }
  54   }
  55   NOT_PRODUCT(_sync_point_count = 0;)
  56   NOT_PRODUCT(_merge_count = 0;)
  57   NOT_PRODUCT(_last_gen_in_use = 0;)
  58 }
  59 
  60 MemTrackWorker::~MemTrackWorker() {
  61   for (int index = 0; index < MAX_GENERATIONS; index ++) {
  62     _gen[index].reset();
  63   }
  64 }
  65 
  66 void* MemTrackWorker::operator new(size_t size) {
  67   assert(false, "use nothrow version");
  68   return NULL;
  69 }
  70 
  71 void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
  72   return allocate(size, false, mtNMT);
  73 }
  74 
  75 void MemTrackWorker::start() {
  76   os::start_thread(this);
  77 }
  78 
  79 /*
  80  * Native memory tracking worker thread loop:
  81  *   1. merge one generation of memory recorders to staging area
  82  *   2. promote staging data to memory snapshot
  83  *
  84  * This thread can run through safepoint.
  85  */
  86 
  87 void MemTrackWorker::run() {
  88   assert(MemTracker::is_on(), "native memory tracking is off");
  89   this->initialize_thread_local_storage();
  90   this->record_stack_base_and_size();
  91   MemSnapshot* snapshot = MemTracker::get_snapshot();
  92   assert(snapshot != NULL, "Worker should not be started");
  93   MemRecorder* rec;
  94 
  95   while (!MemTracker::shutdown_in_progress()) {
  96     NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
  97     {
  98       // take a recorder from earliest generation in buffer
  99       ThreadCritical tc;
 100       rec = _gen[_head].next_recorder();
 101     }
 102     if (rec != NULL) {
 103       // merge the recorder into staging area
 104       if (!snapshot->merge(rec)) {
 105         MemTracker::shutdown(MemTracker::NMT_out_of_memory);
 106       } else {
 107         NOT_PRODUCT(_merge_count ++;)
 108       }
 109       MemTracker::release_thread_recorder(rec);
 110     } else {
 111       // no more recorder to merge, promote staging area
 112       // to snapshot
 113       if (_head != _tail) {
 114         long number_of_classes;
 115         {
 116           ThreadCritical tc;
 117           if (_gen[_head].has_more_recorder() || _head == _tail) {
 118             continue;
 119           }
 120           number_of_classes = _gen[_head].number_of_classes();
 121           _gen[_head].reset();
 122 
 123           // done with this generation, increment _head pointer
 124           _head = (_head + 1) % MAX_GENERATIONS;
 125         }
 126         // promote this generation data to snapshot
 127         if (!snapshot->promote(number_of_classes)) {
 128           // failed to promote, means out of memory
 129           MemTracker::shutdown(MemTracker::NMT_out_of_memory);
 130         }
 131       } else {
 132         snapshot->wait(1000);
 133         ThreadCritical tc;
 134         // check if more data arrived
 135         if (!_gen[_head].has_more_recorder()) {
 136           _gen[_head].add_recorders(MemTracker::get_pending_recorders());
 137         }
 138       }
 139     }
 140   }
 141   assert(MemTracker::shutdown_in_progress(), "just check");
 142 
 143   // transits to final shutdown
 144   MemTracker::final_shutdown();
 145 }
 146 
 147 // at synchronization point, where 'safepoint visible' Java threads are blocked
 148 // at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
 149 // The caller MemTracker::sync() already takes ThreadCritical before calling this
 150 // method.
 151 //
 152 // Following tasks are performed:
 153 //   1. add all recorders in pending queue to current generation
 154 //   2. increase generation
 155 
 156 void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
 157   NOT_PRODUCT(_sync_point_count ++;)
 158   assert(count_recorder(rec) <= MemRecorder::_instance_count,
 159     "pending queue has infinite loop");
 160 
 161   bool out_of_generation_buffer = false;
 162   // check shutdown state inside ThreadCritical
 163   if (MemTracker::shutdown_in_progress()) return;
 164 
 165   _gen[_tail].set_number_of_classes(number_of_classes);
 166   // append the recorders to the end of the generation
 167   _gen[_tail].add_recorders(rec);
 168   assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
 169     "after add to current generation has infinite loop");
 170   // we have collected all recorders for this generation. If there is data,
 171   // we need to increment _tail to start a new generation.
 172   if (_gen[_tail].has_more_recorder()  || _head == _tail) {
 173     _tail = (_tail + 1) % MAX_GENERATIONS;
 174     out_of_generation_buffer = (_tail == _head);
 175   }
 176 
 177   if (out_of_generation_buffer) {
 178     MemTracker::shutdown(MemTracker::NMT_out_of_generation);
 179   }
 180 }
 181 
 182 #ifndef PRODUCT
 183 int MemTrackWorker::count_recorder(const MemRecorder* head) {
 184   int count = 0;
 185   while(head != NULL) {
 186     count ++;
 187     head = head->next();
 188   }
 189   return count;
 190 }
 191 
 192 int MemTrackWorker::count_pending_recorders() const {
 193   int count = 0;
 194   for (int index = 0; index < MAX_GENERATIONS; index ++) {
 195     MemRecorder* head = _gen[index].peek();
 196     if (head != NULL) {
 197       count += count_recorder(head);
 198     }
 199   }
 200   return count;
 201 }
 202 #endif