< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 284       // always expand the heap by an amount aligned to the heap
 285       // region size, the free list should in theory not be empty.
 286       // In either case allocate_free_region() will check for NULL.
 287       res = _hrm.allocate_free_region(is_old);
 288     } else {
 289       _expand_heap_after_alloc_failure = false;
 290     }
 291   }
 292   return res;
 293 }
 294 
 295 HeapWord*
 296 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 297                                                            uint num_regions,
 298                                                            size_t word_size,
 299                                                            AllocationContext_t context) {
 300   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 301   assert(is_humongous(word_size), "word_size should be humongous");
 302   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 303 
 304   // Index of last region in the series + 1.
 305   uint last = first + num_regions;
 306 
 307   // We need to initialize the region(s) we just discovered. This is
 308   // a bit tricky given that it can happen concurrently with
 309   // refinement threads refining cards on these regions and
 310   // potentially wanting to refine the BOT as they are scanning
 311   // those cards (this can happen shortly after a cleanup; see CR
 312   // 6991377). So we have to set up the region(s) carefully and in
 313   // a specific order.
 314 
 315   // The word size sum of all the regions we will allocate.
 316   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 317   assert(word_size <= word_size_sum, "sanity");
 318 
 319   // This will be the "starts humongous" region.
 320   HeapRegion* first_hr = region_at(first);
 321   // The header of the new object will be placed at the bottom of
 322   // the first region.
 323   HeapWord* new_obj = first_hr->bottom();
 324   // This will be the new top of the new object.
 325   HeapWord* obj_top = new_obj + word_size;
 326 
 327   // First, we need to zero the header of the space that we will be
 328   // allocating. When we update top further down, some refinement
 329   // threads might try to scan the region. By zeroing the header we
 330   // ensure that any thread that will try to scan the region will
 331   // come across the zero klass word and bail out.
 332   //
 333   // NOTE: It would not have been correct to have used
 334   // CollectedHeap::fill_with_object() and make the space look like
 335   // an int array. The thread that is doing the allocation will
 336   // later update the object header to a potentially different array
 337   // type and, for a very short period of time, the klass and length
 338   // fields will be inconsistent. This could cause a refinement
 339   // thread to calculate the object size incorrectly.
 340   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 341 
 342   size_t fill_size = word_size_sum - word_size;
 343   if (fill_size >= min_fill_size()) {
 344     fill_with_objects(obj_top, fill_size);
 345   } else {
 346     fill_size = 0;







 347   }
 348 
 349   // We will set up the first region as "starts humongous". This
 350   // will also update the BOT covering all the regions to reflect
 351   // that there is a single object that starts at the bottom of the
 352   // first region.
 353   first_hr->set_starts_humongous(obj_top, fill_size);
 354   first_hr->set_allocation_context(context);
 355   // Then, if there are any, we will set up the "continues
 356   // humongous" regions.
 357   HeapRegion* hr = NULL;
 358   for (uint i = first + 1; i < last; ++i) {
 359     hr = region_at(i);
 360     hr->set_continues_humongous(first_hr);
 361     hr->set_allocation_context(context);
 362   }
 363 
 364   // Up to this point no concurrent thread would have been able to
 365   // do any scanning on any region in this series. All the top
 366   // fields still point to bottom, so the intersection between
 367   // [bottom,top] and [card_start,card_end] will be empty. Before we
 368   // update the top fields, we'll do a storestore to make sure that
 369   // no thread sees the update to top before the zeroing of the
 370   // object header and the BOT initialization.
 371   OrderAccess::storestore();
 372 
 373   // Now that the BOT and the object header have been initialized,
 374   // we can update top of the "starts humongous" region.
 375   first_hr->set_top(first_hr->end());
 376   if (_hr_printer.is_active()) {
 377     _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end());
 378   }
 379 
 380   // Now, we will update the top fields of the "continues humongous"
 381   // regions.
 382   hr = NULL;
 383   for (uint i = first + 1; i < last; ++i) {
 384     hr = region_at(i);
 385     hr->set_top(hr->end());
 386     if (_hr_printer.is_active()) {
 387       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 388     }
 389   }
 390 
 391   assert(hr == NULL || (hr->bottom() < obj_top && obj_top <= hr->end()),






 392          "obj_top should be in last region");
 393 
 394   check_bitmaps("Humongous Region Allocation", first_hr);
 395 
 396   increase_used(word_size_sum * HeapWordSize);


 397 
 398   for (uint i = first; i < last; ++i) {
 399     _humongous_set.add(region_at(i));








 400   }
 401 
 402   return new_obj;
 403 }
 404 
 405 // If could fit into free regions w/o expansion, try.
 406 // Otherwise, if can expand, do so.
 407 // Otherwise, if using ex regions might help, try with ex given back.
 408 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 409   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 410 
 411   verify_region_sets_optional();
 412 
 413   uint first = G1_NO_HRM_INDEX;
 414   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 415 
 416   if (obj_regions == 1) {
 417     // Only one region to allocate, try to use a fast path by directly allocating
 418     // from the free lists. Do not try to expand here, we will potentially do that
 419     // later.




 284       // always expand the heap by an amount aligned to the heap
 285       // region size, the free list should in theory not be empty.
 286       // In either case allocate_free_region() will check for NULL.
 287       res = _hrm.allocate_free_region(is_old);
 288     } else {
 289       _expand_heap_after_alloc_failure = false;
 290     }
 291   }
 292   return res;
 293 }
 294 
 295 HeapWord*
 296 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 297                                                            uint num_regions,
 298                                                            size_t word_size,
 299                                                            AllocationContext_t context) {
 300   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 301   assert(is_humongous(word_size), "word_size should be humongous");
 302   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 303 
 304   // Index of last region in the series.
 305   uint last = first + num_regions - 1;
 306 
 307   // We need to initialize the region(s) we just discovered. This is
 308   // a bit tricky given that it can happen concurrently with
 309   // refinement threads refining cards on these regions and
 310   // potentially wanting to refine the BOT as they are scanning
 311   // those cards (this can happen shortly after a cleanup; see CR
 312   // 6991377). So we have to set up the region(s) carefully and in
 313   // a specific order.
 314 
 315   // The word size sum of all the regions we will allocate.
 316   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 317   assert(word_size <= word_size_sum, "sanity");
 318 
 319   // This will be the "starts humongous" region.
 320   HeapRegion* first_hr = region_at(first);
 321   // The header of the new object will be placed at the bottom of
 322   // the first region.
 323   HeapWord* new_obj = first_hr->bottom();
 324   // This will be the new top of the new object.
 325   HeapWord* obj_top = new_obj + word_size;
 326 
 327   // First, we need to zero the header of the space that we will be
 328   // allocating. When we update top further down, some refinement
 329   // threads might try to scan the region. By zeroing the header we
 330   // ensure that any thread that will try to scan the region will
 331   // come across the zero klass word and bail out.
 332   //
 333   // NOTE: It would not have been correct to have used
 334   // CollectedHeap::fill_with_object() and make the space look like
 335   // an int array. The thread that is doing the allocation will
 336   // later update the object header to a potentially different array
 337   // type and, for a very short period of time, the klass and length
 338   // fields will be inconsistent. This could cause a refinement
 339   // thread to calculate the object size incorrectly.
 340   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 341 
 342   // How many words we use for filler objects.
 343   size_t word_fill_size = word_size_sum - word_size;
 344 
 345   // How many words memory we "waste" which cannot hold a filler object.
 346   size_t words_not_fillable = 0;
 347 
 348   if (word_fill_size >= min_fill_size()) {
 349     fill_with_objects(obj_top, word_fill_size);
 350   } else if (word_fill_size > 0) {
 351     // We have space to fill, but we cannot fit an object there.
 352     words_not_fillable = word_fill_size;
 353     word_fill_size = 0;
 354   }
 355 
 356   // We will set up the first region as "starts humongous". This
 357   // will also update the BOT covering all the regions to reflect
 358   // that there is a single object that starts at the bottom of the
 359   // first region.
 360   first_hr->set_starts_humongous(obj_top, word_fill_size);
 361   first_hr->set_allocation_context(context);
 362   // Then, if there are any, we will set up the "continues
 363   // humongous" regions.
 364   HeapRegion* hr = NULL;
 365   for (uint i = first + 1; i <= last; ++i) {
 366     hr = region_at(i);
 367     hr->set_continues_humongous(first_hr);
 368     hr->set_allocation_context(context);
 369   }
 370 
 371   // Up to this point no concurrent thread would have been able to
 372   // do any scanning on any region in this series. All the top
 373   // fields still point to bottom, so the intersection between
 374   // [bottom,top] and [card_start,card_end] will be empty. Before we
 375   // update the top fields, we'll do a storestore to make sure that
 376   // no thread sees the update to top before the zeroing of the
 377   // object header and the BOT initialization.
 378   OrderAccess::storestore();
 379 







 380   // Now, we will update the top fields of the "continues humongous"
 381   // regions except the last one.
 382   for (uint i = first; i < last; ++i) {

 383     hr = region_at(i);
 384     hr->set_top(hr->end());



 385   }
 386 
 387   hr = region_at(last);
 388   // If we cannot fit a filler object, we must set top to the end
 389   // of the humongous object, otherwise we cannot iterate the heap
 390   // and the BOT will not be complete.
 391   hr->set_top(hr->end() - words_not_fillable);
 392 
 393   assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 394          "obj_top should be in last region");
 395 
 396   check_bitmaps("Humongous Region Allocation", first_hr);
 397 
 398   assert(words_not_fillable == 0 ||
 399          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 400          "Miscalculation in humongous allocation");
 401 
 402   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 403 
 404   for (uint i = first; i <= last; ++i) {
 405     hr = region_at(i);
 406     _humongous_set.add(hr);
 407     if (i == first) {
 408       _hr_printer.alloc(G1HRPrinter::StartsHumongous, hr, hr->top());
 409     } else {
 410       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->top());
 411     }
 412   }
 413 
 414   return new_obj;
 415 }
 416 
 417 // If could fit into free regions w/o expansion, try.
 418 // Otherwise, if can expand, do so.
 419 // Otherwise, if using ex regions might help, try with ex given back.
 420 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 421   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 422 
 423   verify_region_sets_optional();
 424 
 425   uint first = G1_NO_HRM_INDEX;
 426   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 427 
 428   if (obj_regions == 1) {
 429     // Only one region to allocate, try to use a fast path by directly allocating
 430     // from the free lists. Do not try to expand here, we will potentially do that
 431     // later.


< prev index next >