template<bool do_access_check, bool transaction_active>
void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
switch (inst->Opcode(inst_data)) {
case Instruction::NEW_INSTANCE: {
PREAMBLE();
ObjPtr<mirror::Object> obj = nullptr;
ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
shadow_frame.GetMethod(),
self,
false,
do_access_check);
if (LIKELY(c != nullptr)) {
if (UNLIKELY(c->IsStringClass())) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
obj = AllocObjectFromCode<true>(
c.Ptr(),
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
}
if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
// Don't allow finalizable objects to be allocated during a transaction since these can't
// be finalized without a started runtime.
if (transaction_active && obj->GetClass()->IsFinalizable()) {
AbortTransactionF(self, "Allocating finalizable object in transaction: %s",
obj->PrettyTypeOf().c_str());
HANDLE_PENDING_EXCEPTION();
break;
}
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj.Ptr());
inst = inst->Next_2xx();
}
break;
}
art/runtime/entrypoints/entrypoint_utils-inl.h
// Allocate an instance of klass. Throws InstantationError if klass is not instantiable,
// or IllegalAccessError if klass is j.l.Class. Performs a clinit check too.
template <bool kInstrumented>
ALWAYS_INLINE
inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
klass = CheckObjectAlloc(klass, self, &slow_path);
if (UNLIKELY(slow_path)) {
if (klass == nullptr) {
return nullptr;
}
// CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
return klass->Alloc</*kInstrumented*/true>(
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
DCHECK(klass != nullptr);
return klass->Alloc<kInstrumented>(self, allocator_type).Ptr();
}
ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(mirror::Class* klass,
Thread* self,
bool* slow_path) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
// EnsureInitialized (the class initializer) might cause a GC.
// may cause us to suspend meaning that another thread may try to
// change the allocator while we are stuck in the entrypoints of
// an old allocator. Also, the class initialization may fail. To
// handle these cases we mark the slow path boolean as true so
// that the caller knows to check the allocator type to see if it
// has changed and to null-check the return value in case the
// initialization fails.
*slow_path = true;
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
} else {
DCHECK(!self->IsExceptionPending());
}
return h_klass.Get();
}
return klass;
art/runtime/gc/heap.h
AllocatorType GetCurrentAllocator() const {
return current_allocator_;
}
art/runtime/mirror/class-inl.h
template<bool kIsInstrumented, bool kCheckAddFinalizer>
inline ObjPtr<Object> Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
CheckObjectAlloc();
gc::Heap* heap = Runtime::Current()->GetHeap();
const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
if (!kCheckAddFinalizer) {
DCHECK(!IsFinalizable());
}
// Note that the this pointer may be invalidated after the allocation.
ObjPtr<Object> obj =
heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
this,
this->object_size_,
allocator_type,
VoidFunctor());
if (add_finalizer && LIKELY(obj != nullptr)) {
heap->AddFinalizerReference(self, &obj);
if (UNLIKELY(self->IsExceptionPending())) {
// Failed to allocate finalizer reference, it means that the whole allocation failed.
obj = nullptr;
}
}
return obj.Ptr();
}
art/runtime/gc/heap-inl.h
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
ObjPtr<mirror::Class> klass,
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor) {
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self,
AllocatorType allocator_type,
size_t alloc_size,
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
mirror::Object* ret;
switch (allocator_type) {
case kAllocatorTypeBumpPointer: {
DCHECK(bump_pointer_space_ != nullptr);
alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
if (LIKELY(ret != nullptr)) {
*bytes_allocated = alloc_size;
*usable_size = alloc_size;
*bytes_tl_bulk_allocated = alloc_size;
}
break;
}
case kAllocatorTypeNonMoving: {
ret = non_moving_space_->Alloc(self,
alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
break;
case kAllocatorTypeTLAB:
FALLTHROUGH_INTENDED;
case kAllocatorTypeRegionTLAB: {
DCHECK_ALIGNED(alloc_size, kObjectAlignment);
static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
"mismatched alignments");
static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
"mismatched alignments");
if (UNLIKELY(self->TlabSize() < alloc_size)) {
// kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
// that is why the allocator is not passed down.
return AllocWithNewTLAB(self,
alloc_size,
kGrow,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
}
libcore/ojluni/src/main/native/Runtime.c
JNIEXPORT jlong JNICALL
Runtime_freeMemory(JNIEnv *env, jobject this)
{
return JVM_FreeMemory();
}
art/openjdkjvm/OpenjdkJvm.cc
JNIEXPORT jlong JVM_FreeMemory(void) {
return art::Runtime::Current()->GetHeap()->GetFreeMemory();
}
art/runtime/gc/heap.h
// Returns how much free memory we have until we need to grow the heap to perform an allocation.
// Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
size_t total_memory = GetTotalMemory();
// Make sure we don't get a negative number.
return total_memory - std::min(total_memory, byte_allocated);
}
art/runtime/class_linker.cc
mirror::Class* ClassLinker::DefineClass(Thread* self,
const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
StackHandleScope<3> hs(self);
auto klass = hs.NewHandle<mirror::Class>(nullptr);
// Load the class from the dex file.
if (klass == nullptr) {
// Allocate a class with the status of not ready.
// Interface object should get the right size here. Regular class will
// figure out the right size later and be replaced with one of the right
// size when the class becomes resolved.
klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
}
uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
......
// We allow duplicate definitions of the same field in a class_data_item
// but ignore the repeated indexes here, b/21868015.
uint32_t last_field_idx = dex::kDexNoIndex;
for (ClassDataItemIterator it(dex_file, class_data); it.HasNextStaticField(); it.Next()) {
uint32_t field_idx = it.GetMemberIndex();
// Ordering enforced by DexFileVerifier.
DCHECK(last_field_idx == dex::kDexNoIndex || last_field_idx <= field_idx);
if (UNLIKELY(field_idx == last_field_idx)) {
continue;
}
last_field_idx = field_idx;
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id);
char c = descriptor[0];
switch (c) {
case 'L':
case '[':
num_ref++;
break;
case 'J':
case 'D':
num_64++;
break;
case 'I':
case 'F':
num_32++;
break;
case 'S':
case 'C':
num_16++;
break;
case 'B':
case 'Z':
num_8++;
break;
default:
LOG(FATAL) << "Unknown descriptor: " << c;
UNREACHABLE();
}
}
return mirror::Class::ComputeClassSize(false,
0,
num_8,
num_16,
num_32,
num_64,
num_ref,
image_pointer_size_);
}
mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) {
return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
}
mirror::Class* ClassLinker::AllocClass(Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size) {
DCHECK_GE(class_size, sizeof(mirror::Class));
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::Class::InitializeClassVisitor visitor(class_size);
ObjPtr<mirror::Object> k = kMovingClasses ?
heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
if (UNLIKELY(k == nullptr)) {
self->AssertPendingOOMException();
return nullptr;
}
return k->AsClass();
}
// Allocates and initializes storage for an object instance.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self,
ObjPtr<mirror::Class> klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_,
!*pending_task_lock_,
!*backtrace_lock_,
!Roles::uninterruptible_) {
return AllocObjectWithAllocator<kInstrumented, true>(self,
klass,
num_bytes,
GetCurrentAllocator(),
pre_fence_visitor);
}
art/runtime/gc/allocator_type.h
// Different types of allocators.
enum AllocatorType {
kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints.
kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints.
kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints.
kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints.
kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
kAllocatorTypeRegion,
kAllocatorTypeRegionTLAB,
};
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(DlMalloc, gc::kAllocatorTypeDlMalloc)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RosAlloc, gc::kAllocatorTypeRosAlloc)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(BumpPointer, gc::kAllocatorTypeBumpPointer)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(TLAB, gc::kAllocatorTypeTLAB)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(Region, gc::kAllocatorTypeRegion)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RegionTLAB, gc::kAllocatorTypeRegionTLAB)