if (!elf_reader.setSource(source.c_str())) { FLOGE("unable to open source file"); returnfalse; } if (!baseso.empty()) { elf_reader.setBaseSoName(baseso.c_str()); }
if(!elf_reader.Load()) { FLOGE("source so file is invalid"); returnfalse; }
boolObElfReader::Load(){ // try open if (!ReadElfHeader() || !VerifyElfHeader() || !ReadProgramHeader()) returnfalse; FixDumpSoPhdr();
bool has_base_dynamic_info = false; uint32_t base_dynamic_size = 0; if (!haveDynamicSectionInLoadableSegment()) { // try to get dynamic information from base so file. // TODO fix bug in dynamic section rebuild. LoadDynamicSectionFromBaseSource(); has_base_dynamic_info = dynamic_sections_ != nullptr; if (has_base_dynamic_info) { base_dynamic_size = dynamic_count_ * sizeof(Elf_Dyn); } } else { FLOGI("dynamic segment have been found in loadable segment, " "argument baseso will be ignored."); }
if (!ReserveAddressSpace(base_dynamic_size) || !LoadSegments() || !FindPhdr()) { returnfalse; } if (has_base_dynamic_info) { // Copy dynamic information to the end of the file. ApplyDynamicSection(); }
boolElfReader::ReadElfHeader(){ auto rc = source_->Read(&header_, sizeof(header_)); if (rc != sizeof(header_)) { FLOGE("\"%s\" is too small to be an ELF executable", name_); returnfalse; } returntrue; }
// Like the kernel, we only accept program header tables that // are smaller than 64KiB. if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf_Phdr)) { FLOGE("\"%s\" has invalid e_phnum: %zu", name_, phdr_num_); returnfalse; }
voidObElfReader::FixDumpSoPhdr(){ // some shell will release data between loadable phdr(s), just load all memory data if (dump_so_base_ != 0) { std::vector<Elf_Phdr*> loaded_phdrs; for (auto i = 0; i < phdr_num_; i++) { auto phdr = &phdr_table_[i]; if(phdr->p_type != PT_LOAD) continue; loaded_phdrs.push_back(phdr); } std::sort(loaded_phdrs.begin(), loaded_phdrs.end(), [](Elf_Phdr * first, Elf_Phdr * second) { return first->p_vaddr < second->p_vaddr; }); if (!loaded_phdrs.empty()) { for (unsignedlong i = 0, total = loaded_phdrs.size(); i < total; i++) { auto phdr = loaded_phdrs[i]; if (i != total - 1) { // to next loaded segament auto nphdr = loaded_phdrs[i+1]; phdr->p_memsz = nphdr->p_vaddr - phdr->p_vaddr; } else { // to the file end phdr->p_memsz = file_size - phdr->p_vaddr; } phdr->p_filesz = phdr->p_memsz; } } }
auto phdr = phdr_table_; for(auto i = 0; i < phdr_num_; i++) { phdr->p_paddr = phdr->p_vaddr; phdr->p_filesz = phdr->p_memsz; // expend filesize to memsiz phdr->p_offset = phdr->p_vaddr; // since elf has been loaded. just expand file data to dump memory data // phdr->p_flags = 0 // TODO fix flags by PT_TYPE phdr++; } }
boolElfReader::ReserveAddressSpace(uint32_t padding_size){ Elf_Addr min_vaddr; load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr); if (load_size_ == 0) { FLOGE("\"%s\" has no loadable segments", name_); returnfalse; } pad_size_ = padding_size;
uint32_t alloc_size = load_size_ + pad_size_;
uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr); // alloc map data, and load in addr uint8_t * start = newuint8_t[alloc_size]; memset(start, 0, alloc_size);
load_start_ = start; // the first loaded phdr data should be loaded in the start of load_start // (load_bias_ + phdr.vaddr), so load_bias_ = load_start - phdr.vaddr(min_addr) load_bias_ = reinterpret_cast<uint8_t *>(reinterpret_cast<uintptr_t >(start) - reinterpret_cast<uintptr_t >(addr)); returntrue; }
boolElfReader::LoadSegments(){ // TODO fix file dada load error, file data between LOAD seg should be loaded for (size_t i = 0; i < phdr_num_; ++i) { const Elf_Phdr* phdr = &phdr_table_[i];
// if the segment is writable, and does not end on a page boundary, // zero-fill it until the page limit. // if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) { // memset(seg_file_end + reinterpret_cast<uint8_t *>(load_bias_), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end)); // }
// seg_file_end = PAGE_END(seg_file_end);
// seg_file_end is now the first page address after the file // content. If seg_end is larger, we need to zero anything // between them. This is done by using a private anonymous // map for all extra pages. // since data has been clear, just skip this step // if (seg_page_end > seg_file_end) { // void* load_point = (uint8_t*)load_bias_ + seg_file_end; // memset(load_point, 0, seg_page_end - seg_file_end); // } } returntrue; }