1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/thread.hpp"
  27 
  28 
  29 
  30 // Lifecycle management for TSM ParkEvents.
  31 // ParkEvents are type-stable (TSM).
  32 // In our particular implementation they happen to be immortal.
  33 //
  34 // We manage concurrency on the FreeList with a CAS-based
  35 // detach-modify-reattach idiom that avoids the ABA problems
  36 // that would otherwise be present in a simple CAS-based
  37 // push-pop implementation.   (push-one and pop-all)
  38 //
  39 // Caveat: Allocate() and Release() may be called from threads
  40 // other than the thread associated with the Event!
  41 // If we need to call Allocate() when running as the thread in
  42 // question then look for the PD calls to initialize native TLS.
  43 // Native TLS (Win32/Linux/Solaris) can only be initialized or
  44 // accessed by the associated thread.
  45 // See also pd_initialize().
  46 //
  47 // Note that we could defer associating a ParkEvent with a thread
  48 // until the 1st time the thread calls park().  unpark() calls to
  49 // an unprovisioned thread would be ignored.  The first park() call
  50 // for a thread would allocate and associate a ParkEvent and return
  51 // immediately.
  52 
  53 volatile int ParkEvent::ListLock = 0 ;
  54 ParkEvent * volatile ParkEvent::FreeList = NULL ;
  55 
  56 ParkEvent * ParkEvent::Allocate (Thread * t) {
  57   // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
  58   ParkEvent * ev ;
  59 
  60   // Start by trying to recycle an existing but unassociated
  61   // ParkEvent from the global free list.
  62   // Using a spin lock since we are part of the mutex impl.
  63   // 8028280: using concurrent free list without memory management can leak
  64   // pretty badly it turns out.
  65   Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
  66   {
  67     ev = FreeList;
  68     if (ev != NULL) {
  69       FreeList = ev->FreeNext;
  70     }
  71   }
  72   Thread::SpinRelease(&ListLock);
  73 
  74   if (ev != NULL) {
  75     guarantee (ev->AssociatedWith == NULL, "invariant") ;
  76   } else {
  77     // Do this the hard way -- materialize a new ParkEvent.
  78     ev = new ParkEvent () ;
  79     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
  80   }
  81   ev->reset() ;                     // courtesy to caller
  82   ev->AssociatedWith = t ;          // Associate ev with t
  83   ev->FreeNext       = NULL ;
  84   return ev ;
  85 }
  86 
  87 void ParkEvent::Release (ParkEvent * ev) {
  88   if (ev == NULL) return ;
  89   guarantee (ev->FreeNext == NULL      , "invariant") ;
  90   ev->AssociatedWith = NULL ;
  91   // Note that if we didn't have the TSM/immortal constraint, then
  92   // when reattaching we could trim the list.
  93   Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
  94   {
  95     ev->FreeNext = FreeList;
  96     FreeList = ev;
  97   }
  98   Thread::SpinRelease(&ListLock);
  99 }
 100 
 101 // Override operator new and delete so we can ensure that the
 102 // least significant byte of ParkEvent addresses is 0.
 103 // Beware that excessive address alignment is undesirable
 104 // as it can result in D$ index usage imbalance as
 105 // well as bank access imbalance on Niagara-like platforms,
 106 // although Niagara's hash function should help.
 107 
 108 void * ParkEvent::operator new (size_t sz) throw() {
 109   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 110 }
 111 
 112 void ParkEvent::operator delete (void * a) {
 113   // ParkEvents are type-stable and immortal ...
 114   ShouldNotReachHere();
 115 }
 116 
 117 
 118 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 119 // allocate() and release() code for use by Parkers.  The Parker:: forms
 120 // will eventually be removed as we consolide and shift over to ParkEvents
 121 // for both builtin synchronization and JSR166 operations.
 122 
 123 volatile int Parker::ListLock = 0 ;
 124 Parker * volatile Parker::FreeList = NULL ;
 125 
 126 Parker * Parker::Allocate (JavaThread * t) {
 127   guarantee (t != NULL, "invariant") ;
 128   Parker * p ;
 129 
 130   // Start by trying to recycle an existing but unassociated
 131   // Parker from the global free list.
 132   // 8028280: using concurrent free list without memory management can leak
 133   // pretty badly it turns out.
 134   Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
 135   {
 136     p = FreeList;
 137     if (p != NULL) {
 138       FreeList = p->FreeNext;
 139     }
 140   }
 141   Thread::SpinRelease(&ListLock);
 142 
 143   if (p != NULL) {
 144     guarantee (p->AssociatedWith == NULL, "invariant") ;
 145   } else {
 146     // Do this the hard way -- materialize a new Parker..
 147     p = new Parker() ;
 148   }
 149   p->AssociatedWith = t ;          // Associate p with t
 150   p->FreeNext       = NULL ;
 151   p->_counter       = 0;
 152   return p ;
 153 }
 154 
 155 
 156 void Parker::Release (Parker * p) {
 157   if (p == NULL) return ;
 158   guarantee (p->AssociatedWith != NULL, "invariant") ;
 159   guarantee (p->FreeNext == NULL      , "invariant") ;
 160 
 161   int status = pthread_mutex_lock(p->_mutex);
 162   assert(status == 0, "invariant");
 163   {
 164     p->AssociatedWith = NULL;
 165   }
 166   pthread_mutex_unlock(p->_mutex);
 167   assert(status == 0, "invariant");
 168 
 169   Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
 170   {
 171     p->FreeNext = FreeList;
 172     FreeList = p;
 173   }
 174   Thread::SpinRelease(&ListLock);
 175 }
 176