src/share/vm/runtime/park.cpp

Print this page




  42 // question then look for the PD calls to initialize native TLS.
  43 // Native TLS (Win32/Linux/Solaris) can only be initialized or
  44 // accessed by the associated thread.
  45 // See also pd_initialize().
  46 //
  47 // Note that we could defer associating a ParkEvent with a thread
  48 // until the 1st time the thread calls park().  unpark() calls to
  49 // an unprovisioned thread would be ignored.  The first park() call
  50 // for a thread would allocate and associate a ParkEvent and return
  51 // immediately.
  52 
  53 volatile int ParkEvent::ListLock = 0 ;
  54 ParkEvent * volatile ParkEvent::FreeList = NULL ;
  55 
  56 ParkEvent * ParkEvent::Allocate (Thread * t) {
  57   // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
  58   ParkEvent * ev ;
  59 
  60   // Start by trying to recycle an existing but unassociated
  61   // ParkEvent from the global free list.
  62   for (;;) {
  63     ev = FreeList ;
  64     if (ev == NULL) break ;
  65     // 1: Detach - sequester or privatize the list
  66     // Tantamount to ev = Swap (&FreeList, NULL)
  67     if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
  68        continue ;
  69     }
  70 
  71     // We've detached the list.  The list in-hand is now
  72     // local to this thread.   This thread can operate on the
  73     // list without risk of interference from other threads.
  74     // 2: Extract -- pop the 1st element from the list.
  75     ParkEvent * List = ev->FreeNext ;
  76     if (List == NULL) break ;
  77     for (;;) {
  78         // 3: Try to reattach the residual list
  79         guarantee (List != NULL, "invariant") ;
  80         ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
  81         if (Arv == NULL) break ;
  82 
  83         // New nodes arrived.  Try to detach the recent arrivals.
  84         if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
  85             continue ;
  86         }
  87         guarantee (Arv != NULL, "invariant") ;
  88         // 4: Merge Arv into List
  89         ParkEvent * Tail = List ;
  90         while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
  91         Tail->FreeNext = Arv ;
  92     }
  93     break ;
  94   }

  95 
  96   if (ev != NULL) {
  97     guarantee (ev->AssociatedWith == NULL, "invariant") ;
  98   } else {
  99     // Do this the hard way -- materialize a new ParkEvent.
 100     // In rare cases an allocating thread might detach a long list --
 101     // installing null into FreeList -- and then stall or be obstructed.
 102     // A 2nd thread calling Allocate() would see FreeList == null.
 103     // The list held privately by the 1st thread is unavailable to the 2nd thread.
 104     // In that case the 2nd thread would have to materialize a new ParkEvent,
 105     // even though free ParkEvents existed in the system.  In this case we end up
 106     // with more ParkEvents in circulation than we need, but the race is
 107     // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
 108     // is equal to the maximum # of threads that existed at any one time.
 109     // Because of the race mentioned above, segments of the freelist
 110     // can be transiently inaccessible.  At worst we may end up with the
 111     // # of ParkEvents in circulation slightly above the ideal.
 112     // Note that if we didn't have the TSM/immortal constraint, then
 113     // when reattaching, above, we could trim the list.
 114     ev = new ParkEvent () ;
 115     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
 116   }
 117   ev->reset() ;                     // courtesy to caller
 118   ev->AssociatedWith = t ;          // Associate ev with t
 119   ev->FreeNext       = NULL ;
 120   return ev ;
 121 }
 122 
 123 void ParkEvent::Release (ParkEvent * ev) {
 124   if (ev == NULL) return ;
 125   guarantee (ev->FreeNext == NULL      , "invariant") ;
 126   ev->AssociatedWith = NULL ;
 127   for (;;) {
 128     // Push ev onto FreeList
 129     // The mechanism is "half" lock-free.
 130     ParkEvent * List = FreeList ;
 131     ev->FreeNext = List ;
 132     if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
 133   }

 134 }
 135 
 136 // Override operator new and delete so we can ensure that the
 137 // least significant byte of ParkEvent addresses is 0.
 138 // Beware that excessive address alignment is undesirable
 139 // as it can result in D$ index usage imbalance as
 140 // well as bank access imbalance on Niagara-like platforms,
 141 // although Niagara's hash function should help.
 142 
 143 void * ParkEvent::operator new (size_t sz) throw() {
 144   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 145 }
 146 
 147 void ParkEvent::operator delete (void * a) {
 148   // ParkEvents are type-stable and immortal ...
 149   ShouldNotReachHere();
 150 }
 151 
 152 
 153 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 154 // allocate() and release() code for use by Parkers.  The Parker:: forms
 155 // will eventually be removed as we consolide and shift over to ParkEvents
 156 // for both builtin synchronization and JSR166 operations.
 157 
 158 volatile int Parker::ListLock = 0 ;
 159 Parker * volatile Parker::FreeList = NULL ;
 160 
 161 Parker * Parker::Allocate (JavaThread * t) {
 162   guarantee (t != NULL, "invariant") ;
 163   Parker * p ;
 164 
 165   // Start by trying to recycle an existing but unassociated
 166   // Parker from the global free list.
 167   for (;;) {
 168     p = FreeList ;
 169     if (p  == NULL) break ;
 170     // 1: Detach
 171     // Tantamount to p = Swap (&FreeList, NULL)
 172     if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
 173        continue ;
 174     }
 175 
 176     // We've detached the list.  The list in-hand is now
 177     // local to this thread.   This thread can operate on the
 178     // list without risk of interference from other threads.
 179     // 2: Extract -- pop the 1st element from the list.
 180     Parker * List = p->FreeNext ;
 181     if (List == NULL) break ;
 182     for (;;) {
 183         // 3: Try to reattach the residual list
 184         guarantee (List != NULL, "invariant") ;
 185         Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
 186         if (Arv == NULL) break ;
 187 
 188         // New nodes arrived.  Try to detach the recent arrivals.
 189         if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
 190             continue ;
 191         }
 192         guarantee (Arv != NULL, "invariant") ;
 193         // 4: Merge Arv into List
 194         Parker * Tail = List ;
 195         while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
 196         Tail->FreeNext = Arv ;
 197     }
 198     break ;
 199   }

 200 
 201   if (p != NULL) {
 202     guarantee (p->AssociatedWith == NULL, "invariant") ;
 203   } else {
 204     // Do this the hard way -- materialize a new Parker..
 205     // In rare cases an allocating thread might detach
 206     // a long list -- installing null into FreeList --and
 207     // then stall.  Another thread calling Allocate() would see
 208     // FreeList == null and then invoke the ctor.  In this case we
 209     // end up with more Parkers in circulation than we need, but
 210     // the race is rare and the outcome is benign.
 211     // Ideally, the # of extant Parkers is equal to the
 212     // maximum # of threads that existed at any one time.
 213     // Because of the race mentioned above, segments of the
 214     // freelist can be transiently inaccessible.  At worst
 215     // we may end up with the # of Parkers in circulation
 216     // slightly above the ideal.
 217     p = new Parker() ;
 218   }
 219   p->AssociatedWith = t ;          // Associate p with t
 220   p->FreeNext       = NULL ;
 221   return p ;
 222 }
 223 
 224 
 225 void Parker::Release (Parker * p) {
 226   if (p == NULL) return ;
 227   guarantee (p->AssociatedWith != NULL, "invariant") ;
 228   guarantee (p->FreeNext == NULL      , "invariant") ;
 229   p->AssociatedWith = NULL ;
 230   for (;;) {
 231     // Push p onto FreeList
 232     Parker * List = FreeList ;
 233     p->FreeNext = List ;
 234     if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
 235   }

 236 }
 237 


  42 // question then look for the PD calls to initialize native TLS.
  43 // Native TLS (Win32/Linux/Solaris) can only be initialized or
  44 // accessed by the associated thread.
  45 // See also pd_initialize().
  46 //
  47 // Note that we could defer associating a ParkEvent with a thread
  48 // until the 1st time the thread calls park().  unpark() calls to
  49 // an unprovisioned thread would be ignored.  The first park() call
  50 // for a thread would allocate and associate a ParkEvent and return
  51 // immediately.
  52 
  53 volatile int ParkEvent::ListLock = 0 ;
  54 ParkEvent * volatile ParkEvent::FreeList = NULL ;
  55 
  56 ParkEvent * ParkEvent::Allocate (Thread * t) {
  57   // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
  58   ParkEvent * ev ;
  59 
  60   // Start by trying to recycle an existing but unassociated
  61   // ParkEvent from the global free list.
  62   // Using a spin lock since we are part of the mutex impl.
  63   // 8028280: using concurrent free list without memory management can leak
  64   // pretty badly it turns out.
  65   Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
  66   {
  67     ev = FreeList;
  68     if (ev != NULL) {
  69       FreeList = ev->FreeNext;






















  70     }

  71   }
  72   Thread::SpinRelease(&ListLock);
  73 
  74   if (ev != NULL) {
  75     guarantee (ev->AssociatedWith == NULL, "invariant") ;
  76   } else {
  77     // Do this the hard way -- materialize a new ParkEvent.














  78     ev = new ParkEvent () ;
  79     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
  80   }
  81   ev->reset() ;                     // courtesy to caller
  82   ev->AssociatedWith = t ;          // Associate ev with t
  83   ev->FreeNext       = NULL ;
  84   return ev ;
  85 }
  86 
  87 void ParkEvent::Release (ParkEvent * ev) {
  88   if (ev == NULL) return ;
  89   guarantee (ev->FreeNext == NULL      , "invariant") ;
  90   ev->AssociatedWith = NULL ;
  91   // Note that if we didn't have the TSM/immortal constraint, then
  92   // when reattaching we could trim the list.
  93   Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
  94   {
  95     ev->FreeNext = FreeList;
  96     FreeList = ev;
  97   }
  98   Thread::SpinRelease(&ListLock);
  99 }
 100 
 101 // Override operator new and delete so we can ensure that the
 102 // least significant byte of ParkEvent addresses is 0.
 103 // Beware that excessive address alignment is undesirable
 104 // as it can result in D$ index usage imbalance as
 105 // well as bank access imbalance on Niagara-like platforms,
 106 // although Niagara's hash function should help.
 107 
 108 void * ParkEvent::operator new (size_t sz) throw() {
 109   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 110 }
 111 
 112 void ParkEvent::operator delete (void * a) {
 113   // ParkEvents are type-stable and immortal ...
 114   ShouldNotReachHere();
 115 }
 116 
 117 
 118 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 119 // allocate() and release() code for use by Parkers.  The Parker:: forms
 120 // will eventually be removed as we consolide and shift over to ParkEvents
 121 // for both builtin synchronization and JSR166 operations.
 122 
 123 volatile int Parker::ListLock = 0 ;
 124 Parker * volatile Parker::FreeList = NULL ;
 125 
 126 Parker * Parker::Allocate (JavaThread * t) {
 127   guarantee (t != NULL, "invariant") ;
 128   Parker * p ;
 129 
 130   // Start by trying to recycle an existing but unassociated
 131   // Parker from the global free list.
 132   // 8028280: using concurrent free list without memory management can leak
 133   // pretty badly it turns out.
 134   Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
 135   {
 136     p = FreeList;
 137     if (p != NULL) {
 138       FreeList = p->FreeNext;























 139     }

 140   }
 141   Thread::SpinRelease(&ListLock);
 142 
 143   if (p != NULL) {
 144     guarantee (p->AssociatedWith == NULL, "invariant") ;
 145   } else {
 146     // Do this the hard way -- materialize a new Parker..












 147     p = new Parker() ;
 148   }
 149   p->AssociatedWith = t ;          // Associate p with t
 150   p->FreeNext       = NULL ;
 151   return p ;
 152 }
 153 
 154 
 155 void Parker::Release (Parker * p) {
 156   if (p == NULL) return ;
 157   guarantee (p->AssociatedWith != NULL, "invariant") ;
 158   guarantee (p->FreeNext == NULL      , "invariant") ;
 159   p->AssociatedWith = NULL ;
 160 
 161   Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
 162   {
 163     p->FreeNext = FreeList;
 164     FreeList = p;
 165   }
 166   Thread::SpinRelease(&ListLock);
 167 }
 168