1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_stubs.cpp.incl"
  27 
  28 
  29 // Implementation of StubQueue
  30 //
  31 // Standard wrap-around queue implementation; the queue dimensions
  32 // are specified by the _queue_begin & _queue_end indices. The queue
  33 // can be in two states (transparent to the outside):
  34 //
  35 // a) contiguous state: all queue entries in one block (or empty)
  36 //
  37 // Queue: |...|XXXXXXX|...............|
  38 //        ^0  ^begin  ^end            ^size = limit
  39 //            |_______|
  40 //            one block
  41 //
  42 // b) non-contiguous state: queue entries in two blocks
  43 //
  44 // Queue: |XXX|.......|XXXXXXX|.......|
  45 //        ^0  ^end    ^begin  ^limit  ^size
  46 //        |___|       |_______|
  47 //         1st block  2nd block
  48 //
  49 // In the non-contiguous state, the wrap-around point is
  50 // indicated via the _buffer_limit index since the last
  51 // queue entry may not fill up the queue completely in
  52 // which case we need to know where the 2nd block's end
  53 // is to do the proper wrap-around. When removing the
  54 // last entry of the 2nd block, _buffer_limit is reset
  55 // to _buffer_size.
  56 //
  57 // CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
  58 // ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!
  59 
  60 
  61 StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
  62                      Mutex* lock, const char* name) : _mutex(lock) {
  63   intptr_t size = round_to(buffer_size, 2*BytesPerWord);
  64   BufferBlob* blob = BufferBlob::create(name, size);
  65   if( blob == NULL) {
  66     vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name));
  67   }
  68   _stub_interface  = stub_interface;
  69   _buffer_size     = blob->content_size();
  70   _buffer_limit    = blob->content_size();
  71   _stub_buffer     = blob->content_begin();
  72   _queue_begin     = 0;
  73   _queue_end       = 0;
  74   _number_of_stubs = 0;
  75   register_queue(this);
  76 }
  77 
  78 
  79 StubQueue::~StubQueue() {
  80   // Note: Currently StubQueues are never destroyed so nothing needs to be done here.
  81   //       If we want to implement the destructor, we need to release the BufferBlob
  82   //       allocated in the constructor (i.e., we need to keep it around or look it
  83   //       up via CodeCache::find_blob(...).
  84   Unimplemented();
  85 }
  86 
  87 
  88 Stub* StubQueue::stub_containing(address pc) const {
  89   if (contains(pc)) {
  90     for (Stub* s = first(); s != NULL; s = next(s)) {
  91       if (stub_contains(s, pc)) return s;
  92     }
  93   }
  94   return NULL;
  95 }
  96 
  97 
  98 Stub* StubQueue::request_committed(int code_size) {
  99   Stub* s = request(code_size);
 100   if (s != NULL) commit(code_size);
 101   return s;
 102 }
 103 
 104 
 105 Stub* StubQueue::request(int requested_code_size) {
 106   assert(requested_code_size > 0, "requested_code_size must be > 0");
 107   if (_mutex != NULL) _mutex->lock();
 108   Stub* s = current_stub();
 109   int requested_size = round_to(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
 110   if (requested_size <= available_space()) {
 111     if (is_contiguous()) {
 112       // Queue: |...|XXXXXXX|.............|
 113       //        ^0  ^begin  ^end          ^size = limit
 114       assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
 115       if (_queue_end + requested_size <= _buffer_size) {
 116         // code fits in at the end => nothing to do
 117         stub_initialize(s, requested_size);
 118         return s;
 119       } else {
 120         // stub doesn't fit in at the queue end
 121         // => reduce buffer limit & wrap around
 122         assert(!is_empty(), "just checkin'");
 123         _buffer_limit = _queue_end;
 124         _queue_end = 0;
 125       }
 126     }
 127   }
 128   if (requested_size <= available_space()) {
 129     assert(!is_contiguous(), "just checkin'");
 130     assert(_buffer_limit <= _buffer_size, "queue invariant broken");
 131     // Queue: |XXX|.......|XXXXXXX|.......|
 132     //        ^0  ^end    ^begin  ^limit  ^size
 133     s = current_stub();
 134     stub_initialize(s, requested_size);
 135     return s;
 136   }
 137   // Not enough space left
 138   if (_mutex != NULL) _mutex->unlock();
 139   return NULL;
 140 }
 141 
 142 
 143 void StubQueue::commit(int committed_code_size) {
 144   assert(committed_code_size > 0, "committed_code_size must be > 0");
 145   int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
 146   Stub* s = current_stub();
 147   assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
 148   stub_initialize(s, committed_size);
 149   _queue_end += committed_size;
 150   _number_of_stubs++;
 151   if (_mutex != NULL) _mutex->unlock();
 152   debug_only(stub_verify(s);)
 153 }
 154 
 155 
 156 void StubQueue::remove_first() {
 157   if (number_of_stubs() == 0) return;
 158   Stub* s = first();
 159   debug_only(stub_verify(s);)
 160   stub_finalize(s);
 161   _queue_begin += stub_size(s);
 162   assert(_queue_begin <= _buffer_limit, "sanity check");
 163   if (_queue_begin == _queue_end) {
 164     // buffer empty
 165     // => reset queue indices
 166     _queue_begin  = 0;
 167     _queue_end    = 0;
 168     _buffer_limit = _buffer_size;
 169   } else if (_queue_begin == _buffer_limit) {
 170     // buffer limit reached
 171     // => reset buffer limit & wrap around
 172     _buffer_limit = _buffer_size;
 173     _queue_begin = 0;
 174   }
 175   _number_of_stubs--;
 176 }
 177 
 178 
 179 void StubQueue::remove_first(int n) {
 180   int i = MIN2(n, number_of_stubs());
 181   while (i-- > 0) remove_first();
 182 }
 183 
 184 
 185 void StubQueue::remove_all(){
 186   debug_only(verify();)
 187   remove_first(number_of_stubs());
 188   assert(number_of_stubs() == 0, "sanity check");
 189 }
 190 
 191 
 192 enum { StubQueueLimit = 10 };  // there are only a few in the world
 193 static StubQueue* registered_stub_queues[StubQueueLimit];
 194 
 195 void StubQueue::register_queue(StubQueue* sq) {
 196   for (int i = 0; i < StubQueueLimit; i++) {
 197     if (registered_stub_queues[i] == NULL) {
 198       registered_stub_queues[i] = sq;
 199       return;
 200     }
 201   }
 202   ShouldNotReachHere();
 203 }
 204 
 205 
 206 void StubQueue::queues_do(void f(StubQueue* sq)) {
 207   for (int i = 0; i < StubQueueLimit; i++) {
 208     if (registered_stub_queues[i] != NULL) {
 209       f(registered_stub_queues[i]);
 210     }
 211   }
 212 }
 213 
 214 
 215 void StubQueue::stubs_do(void f(Stub* s)) {
 216   debug_only(verify();)
 217   MutexLockerEx lock(_mutex);
 218   for (Stub* s = first(); s != NULL; s = next(s)) f(s);
 219 }
 220 
 221 
 222 void StubQueue::verify() {
 223   // verify only if initialized
 224   if (_stub_buffer == NULL) return;
 225   MutexLockerEx lock(_mutex);
 226   // verify index boundaries
 227   guarantee(0 <= _buffer_size, "buffer size must be positive");
 228   guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
 229   guarantee(0 <= _queue_begin  && _queue_begin  <  _buffer_limit, "_queue_begin out of bounds");
 230   guarantee(0 <= _queue_end    && _queue_end    <= _buffer_limit, "_queue_end   out of bounds");
 231   // verify alignment
 232   guarantee(_buffer_size  % CodeEntryAlignment == 0, "_buffer_size  not aligned");
 233   guarantee(_buffer_limit % CodeEntryAlignment == 0, "_buffer_limit not aligned");
 234   guarantee(_queue_begin  % CodeEntryAlignment == 0, "_queue_begin  not aligned");
 235   guarantee(_queue_end    % CodeEntryAlignment == 0, "_queue_end    not aligned");
 236   // verify buffer limit/size relationship
 237   if (is_contiguous()) {
 238     guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
 239   }
 240   // verify contents
 241   int n = 0;
 242   for (Stub* s = first(); s != NULL; s = next(s)) {
 243     stub_verify(s);
 244     n++;
 245   }
 246   guarantee(n == number_of_stubs(), "number of stubs inconsistent");
 247   guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
 248 }
 249 
 250 
 251 void StubQueue::print() {
 252   MutexLockerEx lock(_mutex);
 253   for (Stub* s = first(); s != NULL; s = next(s)) {
 254     stub_print(s);
 255   }
 256 }