src/share/vm/memory/cardTableModRefBS.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hs25_8011661 Sdiff src/share/vm/memory

src/share/vm/memory/cardTableModRefBS.cpp

Print this page




  99                        _page_size, heap_rs.base(), heap_rs.size());
 100   if (!heap_rs.is_reserved()) {
 101     vm_exit_during_initialization("Could not reserve enough space for the "
 102                                   "card marking array");
 103   }
 104 
 105   // The assember store_check code will do an unsigned shift of the oop,
 106   // then add it to byte_map_base, i.e.
 107   //
 108   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 109   _byte_map = (jbyte*) heap_rs.base();
 110   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 111   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 112   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 113 
 114   jbyte* guard_card = &_byte_map[_guard_index];
 115   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 116   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 117   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 118     // Do better than this for Merlin
 119     vm_exit_out_of_memory(_page_size, "card table last card");
 120   }
 121 
 122   *guard_card = last_card;
 123 
 124    _lowest_non_clean =
 125     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
 126   _lowest_non_clean_chunk_size =
 127     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
 128   _lowest_non_clean_base_chunk_index =
 129     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
 130   _last_LNC_resizing_collection =
 131     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
 132   if (_lowest_non_clean == NULL
 133       || _lowest_non_clean_chunk_size == NULL
 134       || _lowest_non_clean_base_chunk_index == NULL
 135       || _last_LNC_resizing_collection == NULL)
 136     vm_exit_during_initialization("couldn't allocate an LNC array.");
 137   for (i = 0; i < max_covered_regions; i++) {
 138     _lowest_non_clean[i] = NULL;
 139     _lowest_non_clean_chunk_size[i] = 0;


 275     // "guarded" is used for assertion checking below and recalls the fact
 276     // that the would-be end of the new committed region would have
 277     // penetrated the guard page.
 278     HeapWord* new_end_for_commit = new_end_aligned;
 279 
 280     DEBUG_ONLY(bool guarded = false;)
 281     if (new_end_for_commit > _guard_region.start()) {
 282       new_end_for_commit = _guard_region.start();
 283       DEBUG_ONLY(guarded = true;)
 284     }
 285 
 286     if (new_end_for_commit > cur_committed.end()) {
 287       // Must commit new pages.
 288       MemRegion const new_committed =
 289         MemRegion(cur_committed.end(), new_end_for_commit);
 290 
 291       assert(!new_committed.is_empty(), "Region should not be empty here");
 292       if (!os::commit_memory((char*)new_committed.start(),
 293                              new_committed.byte_size(), _page_size)) {
 294         // Do better than this for Merlin
 295         vm_exit_out_of_memory(new_committed.byte_size(),
 296                 "card table expansion");
 297       }
 298     // Use new_end_aligned (as opposed to new_end_for_commit) because
 299     // the cur_committed region may include the guard region.
 300     } else if (new_end_aligned < cur_committed.end()) {
 301       // Must uncommit pages.
 302       MemRegion const uncommit_region =
 303         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 304                                                 cur_committed.end()));
 305       if (!uncommit_region.is_empty()) {
 306         // It is not safe to uncommit cards if the boundary between
 307         // the generations is moving.  A shrink can uncommit cards
 308         // owned by generation A but being used by generation B.
 309         if (!UseAdaptiveGCBoundary) {
 310           if (!os::uncommit_memory((char*)uncommit_region.start(),
 311                                    uncommit_region.byte_size())) {
 312             assert(false, "Card table contraction failed");
 313             // The call failed so don't change the end of the
 314             // committed region.  This is better than taking the
 315             // VM down.




  99                        _page_size, heap_rs.base(), heap_rs.size());
 100   if (!heap_rs.is_reserved()) {
 101     vm_exit_during_initialization("Could not reserve enough space for the "
 102                                   "card marking array");
 103   }
 104 
 105   // The assember store_check code will do an unsigned shift of the oop,
 106   // then add it to byte_map_base, i.e.
 107   //
 108   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 109   _byte_map = (jbyte*) heap_rs.base();
 110   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 111   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 112   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 113 
 114   jbyte* guard_card = &_byte_map[_guard_index];
 115   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 116   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 117   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 118     // Do better than this for Merlin
 119     vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
 120   }
 121 
 122   *guard_card = last_card;
 123 
 124    _lowest_non_clean =
 125     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
 126   _lowest_non_clean_chunk_size =
 127     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
 128   _lowest_non_clean_base_chunk_index =
 129     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
 130   _last_LNC_resizing_collection =
 131     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
 132   if (_lowest_non_clean == NULL
 133       || _lowest_non_clean_chunk_size == NULL
 134       || _lowest_non_clean_base_chunk_index == NULL
 135       || _last_LNC_resizing_collection == NULL)
 136     vm_exit_during_initialization("couldn't allocate an LNC array.");
 137   for (i = 0; i < max_covered_regions; i++) {
 138     _lowest_non_clean[i] = NULL;
 139     _lowest_non_clean_chunk_size[i] = 0;


 275     // "guarded" is used for assertion checking below and recalls the fact
 276     // that the would-be end of the new committed region would have
 277     // penetrated the guard page.
 278     HeapWord* new_end_for_commit = new_end_aligned;
 279 
 280     DEBUG_ONLY(bool guarded = false;)
 281     if (new_end_for_commit > _guard_region.start()) {
 282       new_end_for_commit = _guard_region.start();
 283       DEBUG_ONLY(guarded = true;)
 284     }
 285 
 286     if (new_end_for_commit > cur_committed.end()) {
 287       // Must commit new pages.
 288       MemRegion const new_committed =
 289         MemRegion(cur_committed.end(), new_end_for_commit);
 290 
 291       assert(!new_committed.is_empty(), "Region should not be empty here");
 292       if (!os::commit_memory((char*)new_committed.start(),
 293                              new_committed.byte_size(), _page_size)) {
 294         // Do better than this for Merlin
 295         vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
 296                 "card table expansion");
 297       }
 298     // Use new_end_aligned (as opposed to new_end_for_commit) because
 299     // the cur_committed region may include the guard region.
 300     } else if (new_end_aligned < cur_committed.end()) {
 301       // Must uncommit pages.
 302       MemRegion const uncommit_region =
 303         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 304                                                 cur_committed.end()));
 305       if (!uncommit_region.is_empty()) {
 306         // It is not safe to uncommit cards if the boundary between
 307         // the generations is moving.  A shrink can uncommit cards
 308         // owned by generation A but being used by generation B.
 309         if (!UseAdaptiveGCBoundary) {
 310           if (!os::uncommit_memory((char*)uncommit_region.start(),
 311                                    uncommit_region.byte_size())) {
 312             assert(false, "Card table contraction failed");
 313             // The call failed so don't change the end of the
 314             // committed region.  This is better than taking the
 315             // VM down.


src/share/vm/memory/cardTableModRefBS.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File