Merge branch 'GP-3328_isabella3412_cramfs'

This commit is contained in:
Ryan Kurtz 2023-05-25 07:21:01 -04:00
commit 7dfaa2ccc3
11 changed files with 1362 additions and 0 deletions

View file

@ -0,0 +1,117 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import ghidra.app.services.AbstractAnalyzer;
import ghidra.app.services.AnalyzerType;
import ghidra.app.util.bin.*;
import ghidra.app.util.importer.MessageLog;
import ghidra.app.util.opinion.BinaryLoader;
import ghidra.framework.options.Options;
import ghidra.program.model.address.Address;
import ghidra.program.model.address.AddressSetView;
import ghidra.program.model.data.DataType;
import ghidra.program.model.lang.Language;
import ghidra.program.model.lang.Processor;
import ghidra.program.model.listing.CodeUnit;
import ghidra.program.model.listing.Program;
import ghidra.program.model.mem.MemoryAccessException;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class CramFsAnalyzer extends AbstractAnalyzer {
// Seen offsets plus the count of how many times seen. Should only be 1
// for each file inode, if 2 inodes share data space and have same contents.
public CramFsAnalyzer() {
super("CramFS Analyzer", "Annotates CramFS binaries", AnalyzerType.BYTE_ANALYZER);
}
@Override
public boolean getDefaultEnablement(Program program) {
return true;
}
@Override
public boolean canAnalyze(Program program) {
try {
Options options = program.getOptions(Program.PROGRAM_INFO);
String format = options.getString("Executable Format", null);
if (!BinaryLoader.BINARY_NAME.equals(format)) {
return false;
}
Language language = program.getLanguage();
if (language.getProcessor() == Processor.findOrPossiblyCreateProcessor("DATA") &&
!language.isBigEndian()) {
return false;
}
int magic = program.getMemory().getInt(program.getMinAddress());
return magic == CramFsConstants.MAGIC;
}
catch (MemoryAccessException e) {
//Ignore
}
return false;
}
@Override
public boolean added(Program program, AddressSetView set, TaskMonitor monitor, MessageLog log)
throws CancelledException {
Address minAddress = program.getMinAddress();
boolean isLE = !program.getLanguage().isBigEndian();
try (ByteProvider provider = new MemoryByteProvider(program.getMemory(), minAddress)) {
BinaryReader reader = new BinaryReader(provider, isLE);
CramFsSuper cramFsSuper = new CramFsSuper(reader);
DataType dataType = cramFsSuper.toDataType();
program.getListing().createData(minAddress, dataType);
program.getListing()
.setComment(minAddress, CodeUnit.PLATE_COMMENT,
cramFsSuper.getRoot().toString());
int offset = cramFsSuper.getRoot().getOffsetAdjusted();
for (int i = 0; i < cramFsSuper.getFsid().getFiles() - 1; i++) {
monitor.checkCancelled();
reader.setPointerIndex(offset);
Address inodeAddress = minAddress.add(offset);
CramFsInode newInode = new CramFsInode(reader);
if (newInode.isFile()) {
Address inodeDataAddress = minAddress.add(newInode.getOffsetAdjusted());
program.getListing()
.setComment(inodeDataAddress, CodeUnit.PLATE_COMMENT,
newInode.getName() + " Data/Bytes\n");
}
DataType inodeDataType = newInode.toDataType();
program.getListing().createData(inodeAddress, inodeDataType);
program.getListing()
.setComment(inodeAddress, CodeUnit.PLATE_COMMENT,
newInode.getName() + "\n" + newInode.toString());
offset += inodeDataType.getLength();
}
}
catch (Exception e) {
log.appendException(e);
return false;
}
return true;
}
}

View file

@ -0,0 +1,92 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.IOException;
import ghidra.app.util.bin.ByteProvider;
/**
* @see <a href="https://github.com/torvalds/linux/tree/master/fs/cramfs">/fs/cramfs</a>
*/
public class CramFsBlock {
private int blockPointer;
private int startAddress;
private boolean isDirectPointer;
private boolean isCompressed;
private int blockSize;
private ByteProvider provider;
static final int IS_DIRECT_POINTER = (1 << 30);
static final int IS_UNCOMPRESSED = (1 << 31);
/**
* This constructor is for regular contiguous blocks in a cramfs file
* that do not have the extension flag set.
* @param start the address for the start of this block.
* @param blockSize the size of the cramfs block.
* @param provider the byteProvider for the block header.
*/
public CramFsBlock(int start, int blockSize, ByteProvider provider) {
startAddress = blockPointer = start;
this.blockSize = blockSize;
this.provider = provider;
isDirectPointer = false;
isCompressed = false;
}
/**
* Returns the block pointer for the cramfs block.
* @return the block pointer for the cramfs block.
*/
public int getBlockPointer() {
return blockPointer;
}
/**
* Returns true if the block is a direct pointer.
* @return true if the block is a direct pointer.
*/
public boolean isDirectPointer() {
return isDirectPointer;
}
/**
* Returns true if the block is compressed.
* @return true if the block is compressed.
*/
public boolean isCompressed() {
return isCompressed;
}
/**
* Returns the size of the cramfs block.
* @return the size of the cramfs block.
*/
public int getBlockSize() {
return blockSize;
}
/**
* Reads the data block in its entirety.
* @return the read bytes in a byte array.
* @throws IOException if there is an error while reading the data block.
*/
public byte[] readBlock() throws IOException {
return provider.readBytes(startAddress, blockSize);
}
}

View file

@ -0,0 +1,94 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.util.ArrayList;
import java.util.List;
import ghidra.app.util.bin.ByteProvider;
public class CramFsBlockFactory {
static final int IS_DIRECT_POINTER = (1 << 30);
static final int IS_UNCOMPRESSED = (1 << 31);
private CramFsInode cramfsInode;
private ByteProvider provider;
private List<Integer> blockPointerList;
//For the size of compressed block sizes.
private List<Integer> blockSizes;
//It is possible this will always default to false, but just in case.
//This will determine if the data blocks have special conditions on them.
private boolean blockPointerExtensionsEnabled;
/**
* This class takes an iNode and produces a List of CramFsBlocks that are
* set appropriately depending on their flags,
* and the flag CRAMFS_FLAG_EXT_BLOCK_POINTERS from the CramFsSuper block.
* @param cramfsInode the parent node for this block.
* @param provider the byteProvider for the block header.
* @param blockPointerList a list of the block pointers.
* @param blockPointerExtensionsEnabled true if the block pointer extensions are enabled.
*/
public CramFsBlockFactory(CramFsInode cramfsInode, ByteProvider provider,
List<Integer> blockPointerList, boolean blockPointerExtensionsEnabled) {
this.cramfsInode = cramfsInode;
this.provider = provider;
this.blockPointerExtensionsEnabled = blockPointerExtensionsEnabled;
this.blockPointerList = blockPointerList;
}
/**
* This function will use the inode to calculate certain things for the block,
* such as calculating compressed block sizes etc.
* If the block pointer extension flag is set not in the super block,
* we will calculate the size of each zlibbed block, and create a list of blocks appropriately.
* @return the block list.
*/
public List<CramFsBlock> produceBlocks() {
List<CramFsBlock> blockList = new ArrayList<>();
if (!blockPointerExtensionsEnabled) { //focus on this one
blockSizes = calculateCompressedBlockSizes();
//Use blockSizes to create Blocks.
for (int i = 0; i < blockSizes.size(); i++) {
blockList.add(new CramFsBlock(blockPointerList.get(i), blockSizes.get(i).intValue(),
provider));
}
}
return blockList;
}
/**
* Returns the cramfsInode.
* @return the cramfsInode.
*/
public CramFsInode getCramfsInode() {
return cramfsInode;
}
private List<Integer> calculateCompressedBlockSizes() {
List<Integer> compressedBlockSizes = new ArrayList<Integer>();
for (int i = 0; i < blockPointerList.size() - 1; i++) {
compressedBlockSizes.add(blockPointerList.get(i + 1) - blockPointerList.get(i));
}
return compressedBlockSizes;
}
}

View file

@ -0,0 +1,177 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import ghidra.app.util.bin.ByteProvider;
import ghidra.file.formats.zlib.ZLIB;
public class CramFsBlockReader {
private ByteProvider provider;
private CramFsInode cramfsInode;
private List<Integer> blockPointerTable = new LinkedList<>();
private List<Integer> compressedBlockSizes = new ArrayList<>();
private boolean isLittleEndian;
/**
* This constructor reads the CramFS Block.
* @param provider the byteProvider for the block header.
* @param cramfsInode the parent node for this block.
* @param isLittleEndian if the block is little endian or not.
* @throws IOException if there is an error while reading the block.
*/
public CramFsBlockReader(ByteProvider provider, CramFsInode cramfsInode, boolean isLittleEndian)
throws IOException {
this.provider = provider;
this.cramfsInode = cramfsInode;
this.isLittleEndian = isLittleEndian;
populateBlockPointerTable();
calculateCompressedBlockSizes();
}
private void populateBlockPointerTable() throws IOException {
int numBlockPointers = calculateStartAddress();
if (numBlockPointers < 0) {
throw new IOException("Start Address for data block not found");
}
int inodeDataOffset = cramfsInode.getOffsetAdjusted();
for (int i = 0; i < numBlockPointers - 1; i++) {
byte[] tempBuffer =
provider.readBytes(inodeDataOffset, CramFsConstants.BLOCK_POINTER_SIZE);
//byteProvider will be Big Endian by default
ByteBuffer byteBuffer = ByteBuffer.wrap(tempBuffer);
if (isLittleEndian) {
blockPointerTable.add(Integer.reverseBytes(byteBuffer.getInt()));
}
else {
blockPointerTable.add(byteBuffer.getInt());
}
inodeDataOffset += CramFsConstants.BLOCK_POINTER_SIZE;
}
}
/**
* Calculates the start address of the data block using the
* block pointer table that precedes compressed data.
* @return the number of block pointers associated with this data section.
* @throws IOException if error occurs reading from the byte provider.
*/
private int calculateStartAddress() throws IOException {
int numBlockPointers = -1;
int dataOffset = cramfsInode.getOffsetAdjusted();
int dataOffsetStart = dataOffset;
boolean firstAddressFound = false;
while (!firstAddressFound) {
byte[] possibleZlibHeader =
provider.readBytes(dataOffset, CramFsConstants.ZLIB_MAGIC_SIZE);
if (Arrays.equals(possibleZlibHeader, ZLIB.ZLIB_COMPRESSION_DEFAULT) ||
Arrays.equals(possibleZlibHeader, ZLIB.ZLIB_COMPRESSION_BEST) ||
Arrays.equals(possibleZlibHeader, ZLIB.ZLIB_COMPRESSION_NO_LOW)) {
blockPointerTable.add(dataOffset);
firstAddressFound = true;
return ((dataOffset - dataOffsetStart) / CramFsConstants.BLOCK_POINTER_SIZE) + 1;
}
dataOffset += 4;
}
return numBlockPointers;
}
/**
* Uses the block pointer table which contains addresses
* to calculate the size of the compressed blocks of data
* for use in uncompressing each block. Adds the size of each block
* to the compressedBlockSizes arrayList.
*/
private void calculateCompressedBlockSizes() {
for (int i = 0; i < blockPointerTable.size() - 1; i++) {
compressedBlockSizes.add(blockPointerTable.get(i + 1) - blockPointerTable.get(i));
}
}
/**
* Reads one block from the data pointed to by the CramfsInode.
* @param dataBlockIndex the index of the block to read.
* @return a byte array representing the compressed data for a compressed block.
* @throws IOException if error occurs when reading the data block.
*/
public byte[] readDataBlock(int dataBlockIndex) throws IOException {
return provider.readBytes(blockPointerTable.get(dataBlockIndex),
compressedBlockSizes.get(dataBlockIndex));
}
/**
* Sends compressed data block to be uncompressed.
* @param dataBlockIndex the index of the block to read.
* @return Uncompressed data as a ByteArrayInputStream.
* @throws IOException if an error occurs when reading the decompressed data block.
*/
public InputStream readDataBlockDecompressed(int dataBlockIndex) throws IOException {
Integer index = blockPointerTable.get(dataBlockIndex);
Integer length = compressedBlockSizes.get(dataBlockIndex);
byte[] compressedBytes = provider.readBytes(index, length);
InputStream compressedInputStream = new ByteArrayInputStream(compressedBytes);
ZLIB zlib = new ZLIB();
ByteArrayOutputStream decompressedOutputStream =
zlib.decompress(compressedInputStream, CramFsConstants.DEFAULT_BLOCK_SIZE);
return new ByteArrayInputStream(decompressedOutputStream.toByteArray());
}
/**
* Gets the provider.
* @return provider.
*/
public ByteProvider getProvider() {
return provider;
}
/**
* Gets the CramfsInode.
* @return cramfsInode.
*/
public CramFsInode getCramfsInode() {
return cramfsInode;
}
/**
* Gets the block pointer table.
* @return the block pointer table.
*/
public List<Integer> getBlockPointerTable() {
return blockPointerTable;
}
/**
* Gets the number of block pointers.
* @return the number of block pointers.
*/
public int getNumBlockPointers() {
return blockPointerTable.size() - 1;
}
}

View file

@ -0,0 +1,53 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
/**
* @see <a href="https://github.com/torvalds/linux/tree/master/fs/cramfs">/fs/cramfs</a>
*/
public final class CramFsConstants {
public static final int HEADER_STRING_LENGTH = 16;
public static final int MAGIC = 0x28cd3d45;
/**
* Constant size of an inode in bytes in memory.
*/
public static final int INODE_SIZE = 12;
/**
* Flag as described in cramfs_fs.h.
*/
public static final int CRAMFS_FLAG_EXT_BLOCK_POINTERS = 0x00000800;
/**
* Documentation points to this being the default size
* provide option for user if they know the block size.
*/
public static final int DEFAULT_BLOCK_SIZE = 4096;
public static final int BLOCK_POINTER_SIZE = 4;
public static final int ZLIB_MAGIC_SIZE = 2;
/**
* Width of various bitfields in struct {@link CramFsInode}
*/
public static final int CRAMFS_MODE_WIDTH = 16;
public static final int CRAMFS_UID_WIDTH = 16;
public static final int CRAMFS_SIZE_WIDTH = 24;
public static final int CRAMFS_GID_WIDTH = 8;
public static final int CRAMFS_NAMELEN_WIDTH = 6;
public static final int CRAMFS_OFFSET_WIDTH = 26;
}

View file

@ -0,0 +1,201 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.IOException;
import java.util.*;
import ghidra.app.util.bin.BinaryReader;
import ghidra.app.util.bin.ByteProvider;
import ghidra.formats.gfilesystem.*;
import ghidra.formats.gfilesystem.annotations.FileSystemInfo;
import ghidra.formats.gfilesystem.factory.GFileSystemBaseFactory;
import ghidra.util.*;
import ghidra.util.exception.CancelledException;
import ghidra.util.exception.CryptoException;
import ghidra.util.task.TaskMonitor;
@FileSystemInfo(type = "cramfs", description = "CRAMFS", factory = GFileSystemBaseFactory.class)
public class CramFsFileSystem extends GFileSystemBase {
private boolean isLittleEndian;
private CramFsSuper cramFsSuper;
private Map<GFile, CramFsInode> fileToInodeMap = new HashMap<>();
private List<GFile> rootListing = new ArrayList<>();
private Map<GFile, List<GFile>> directoryToChildMap = new HashMap<>();
public CramFsFileSystem(String fileSystemName, ByteProvider provider) {
super(fileSystemName, provider);
}
@Override
public boolean isValid(TaskMonitor monitor) throws IOException {
byte[] bytes = provider.readBytes(0, 4);
DataConverter[] dataConverter =
new DataConverter[] { new LittleEndianDataConverter(), new BigEndianDataConverter() };
for (int i = 0; i < dataConverter.length; i++) {
if (dataConverter[i].getInt(bytes) == CramFsConstants.MAGIC) {
isLittleEndian = !dataConverter[i].isBigEndian();
return true;
}
}
return false;
}
@Override
public void open(TaskMonitor monitor) throws IOException, CryptoException, CancelledException {
BinaryReader reader = new BinaryReader(provider, isLittleEndian);
cramFsSuper = new CramFsSuper(reader);
if (cramFsSuper.isExtensionsBlockPointerFlagEnabled()) {
throw new IOException("Extended Block Pointer flag is set, currently unsupported");
}
List<CramFsInode> childList = cramFsSuper.getChildList();
Map<Long, CramFsInode> subDirectoryMap = new HashMap<>();
GFile parent = root;
for (CramFsInode cramFsInode : childList) {
monitor.checkCancelled();
if (cramFsInode.isDirectory()) {
subDirectoryMap.put((long) cramFsInode.getOffsetAdjusted(), cramFsInode);
}
if (subDirectoryMap.containsKey(cramFsInode.getAddress())) {
break;
}
GFileImpl iNodeFile = GFileImpl.fromPathString(this, parent, cramFsInode.getName(),
null, cramFsInode.isDirectory(), cramFsInode.getSize());
fileToInodeMap.put(iNodeFile, cramFsInode);
rootListing.add(iNodeFile);
}
}
/**
* Small utility to search childList for specific cramFsInode by name.
* @param targetName the name of the cramFsInode we are searching for.
* @return the target cramFs Inode.
*/
private CramFsInode getChildInodeByName(String targetName) {
CramFsInode target = null;
List<CramFsInode> childList = cramFsSuper.getChildList();
for (CramFsInode cramFsInode : childList) {
if (cramFsInode.getName().contentEquals(targetName)) {
target = cramFsInode;
break;
}
}
return target;
}
@Override
public List<GFile> getListing(GFile directory) throws IOException {
//Return listing for root directory, should be first listing returned anyway.
//Checking for null fixed some issues as well.
if (directory == root || directory == null) {
return rootListing;
}
if (directoryToChildMap.containsKey(directory)) {
return directoryToChildMap.get(directory);
}
CramFsInode parentInode = getChildInodeByName(directory.getName());
//Store current inode and size info in a counter.
int directoryLength = parentInode.getSize();
List<GFile> directoryListing = populateChildList(directory, parentInode, directoryLength);
directoryToChildMap.put(directory, directoryListing);
return directoryListing;
}
private List<GFile> populateChildList(GFile directory, CramFsInode parentInode,
int directoryLength) {
List<CramFsInode> childList = cramFsSuper.getChildList();
List<GFile> directoryListing = new ArrayList<>();
int startIndex = computeStartIndex(childList, parentInode);
for (int i = startIndex; i < childList.size(); i++) {
if (directoryLength <= 0) {
break;
}
CramFsInode entryInode = childList.get(i);
GFileImpl iNodeFile = GFileImpl.fromPathString(this, directory, entryInode.getName(),
null, entryInode.isDirectory(), entryInode.getSize());
directoryListing.add(iNodeFile);
directoryLength -= (CramFsConstants.INODE_SIZE + (entryInode.getNamelen() * 4));
fileToInodeMap.put(iNodeFile, entryInode);
}
return directoryListing;
}
/**
* Used to find the first entry in a directory from the list of child inodes.
* @param childList the list of child inodes.
* @param parentInode the parent cramFsInode.
* @return the start index in a directory.
*/
private int computeStartIndex(List<CramFsInode> childList, CramFsInode parentInode) {
int startIndex = 0;
//Iterate through full childlist until an inode address matches the directory offset.
for (int i = 0; i < childList.size(); i++) {
if (childList.get(i).getAddress() == parentInode.getOffsetAdjusted()) {
startIndex = i;
break;
}
}
return startIndex;
}
@Override
public ByteProvider getByteProvider(GFile file, TaskMonitor monitor)
throws IOException, CancelledException {
CramFsInode childInode = fileToInodeMap.get(file);
if (childInode.getSize() >= 0xffffff) {
throw new IOException("File is larger than 16MB and was clipped, cannot open.");
}
ByteProvider fileBP = fsService.getDerivedByteProvider(provider.getFSRL(), file.getFSRL(),
file.getPath(), childInode.getSize(), () -> {
return new LazyCramFsInputStream(provider, childInode,
cramFsSuper.isLittleEndian());
}, monitor);
return fileBP;
}
}

View file

@ -0,0 +1,86 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
import ghidra.app.util.bin.StructConverter;
import ghidra.program.model.data.*;
import ghidra.util.exception.DuplicateNameException;
public class CramFsInfo implements StructConverter {
private int crc;
private int edition;
private int blocks;
private int files;
/**
* This constructor reads the cramfs info/attributes
* @param reader the binary reader for the cramfs info/attributes.
* @throws IOException if there is an error while reading the cramfs info/attributes.
*/
public CramFsInfo(BinaryReader reader) throws IOException {
crc = reader.readNextInt();
edition = reader.readNextInt();
blocks = reader.readNextInt();
files = reader.readNextInt();
}
/**
* Returns the crc value of the cramfs info.
* @return the crc value of the cramfs info.
*/
public int getCrc() {
return crc;
}
/**
* Returns the edition of the cramfs info.
* @return the edition of the cramfs info.
*/
public int getEdition() {
return edition;
}
/**
* Returns the blocks of the cramfs info.
* @return the blocks of the cramfs info.
*/
public int getBlocks() {
return blocks;
}
/**
* Returns the files of the cramfs info.
* @return the files of the cramfs info.
*/
public int getFiles() {
return files;
}
@Override
public DataType toDataType() throws DuplicateNameException, IOException {
Structure struct = new StructureDataType("cramfs_info", 0);
struct.add(DWORD, "crc", null);
struct.add(DWORD, "edition", null);
struct.add(DWORD, "blocks", null);
struct.add(DWORD, "files", null);
return struct;
}
}

View file

@ -0,0 +1,197 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
import ghidra.app.util.bin.StructConverter;
import ghidra.program.model.data.*;
import ghidra.util.exception.DuplicateNameException;
public class CramFsInode implements StructConverter {
//Packed integers, one 32 bit integer, but two less than 32 bit integers packed into the bit space.
//__u32 mode: CRAMFS_MODE_WIDTH, id:CRAMFS_UID_WIDTH
//__u32 size:CRAMFS_SIZE_WIDTH, gid:CRAMFS_GID_WIDTH
//__u32 namelen:CRAMFS_NAMELEN_WIDTH, offset:CRAMFS_OFFSET_WIDTH
private int mode;
private int uid;
private int size; //Must be a byte array of 3 bytes for 24 bits
private int gid; //8 bit sized integer
private int namelen;
private int offset;
private String name; //Not explicitly in cramfs_inode
private long address; //absolute address in file, used for directory traversal.
public CramFsInode(BinaryReader reader) throws IOException {
//Before reader reads anything and progresses, get addr for start of inode.
address = reader.getPointerIndex();
int modeUID = reader.readNextInt();
int sizeGID = reader.readNextInt();
int namelenOffset = reader.readNextInt();
if (reader.isBigEndian()) {
modeUID = Integer.reverseBytes(modeUID);
sizeGID = Integer.reverseBytes(sizeGID);
namelenOffset = Integer.reverseBytes(namelenOffset);
}
//Always read value as little endian
uid = ((modeUID & 0xffff0000) >> CramFsConstants.CRAMFS_UID_WIDTH) & 0x0000ffff;
mode = (modeUID & 0x0000ffff);
gid = ((sizeGID & 0xff000000) >> CramFsConstants.CRAMFS_SIZE_WIDTH) & 0x000000ff;
size = (sizeGID & 0x00ffffff);
offset =
((namelenOffset & 0xffffffc0) >> CramFsConstants.CRAMFS_NAMELEN_WIDTH) & 0x0cffffff;
namelen = (namelenOffset & 0x0000003f);
name = reader.readNextAsciiString(namelen * 4);
}
@Override
public DataType toDataType() throws DuplicateNameException, IOException {
int length = namelen * 4;
Structure struct = new StructureDataType("cramfs_inode_" + length, 0);
struct.add(DWORD, "modeUID", null);
struct.add(DWORD, "sizeGID", null);
struct.add(DWORD, "namelenOffset", null);
if (namelen > 0) {
struct.add(STRING, length, "name", null);
}
return struct;
}
@Override
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append("mode = 0x" + Integer.toHexString(mode) + " 16 MSB, UID = 0x" +
Integer.toHexString(uid) + " 16 LSB\n");
buffer.append("size = 0x" + Integer.toHexString(size) + " 24 MSB, GID = 0x" +
Integer.toHexString(gid) + " 8 LSB\n");
buffer.append("namelen = 0x" + Integer.toHexString(namelen) + " 6 MSB, offset = 0x" +
Integer.toHexString(offset) + " 26 LSB\n");
if (isFile()) {
buffer.append("Pointer to data = 0x" + Integer.toHexString(getOffsetAdjusted()) + "\n");
}
if (isDirectory()) {
if (offset == 0) {
buffer.append("EMPTY DIRECTORY\n");
}
else {
buffer.append(
"Pointer to next inode = 0x" + Integer.toHexString(getOffsetAdjusted()) + "\n");
}
}
return buffer.toString();
}
/**
* Returns the mode of the CramFSInode.
* @return the mode.
*/
public int getMode() {
return mode;
}
/**
* Returns the unique identifier of the inode.
* @return the unique identifier.
*/
public int getUid() {
return uid;
}
/**
* Returns the size of the inode.
* @return the size.
*/
public int getSize() {
return size;
}
/**
* Returns the group identifier of the inode.
* @return the group identifier.
*/
public int getGid() {
return gid;
}
/**
* Returns the name length of the inode.
* @return the name length.
*/
public int getNamelen() {
return namelen;
}
/**
* Returns the offset of the inode.
* @return the offset.
*/
public int getOffset() {
return offset;
}
/**
* Returns the name of the inode.
* @return the name.
*/
public String getName() {
return name;
}
/**
* Returns the adjusted offset of the inode.
* @return the adjusted offset of the inode.
*/
public int getOffsetAdjusted() {
return offset * 4;
}
/**
* Returns true if the inode is a file.
* @return true if the inode is a file.
*/
public boolean isFile() {
return ((mode & 0x8000) != 0);
}
/**
* Returns true if the inode is a directory.
* @return true if the inode is a directory.
*/
public boolean isDirectory() {
return ((mode & 0x4000) != 0);
}
/**
* Returns the address of the inode.
* @return the address of the inode.
*/
public long getAddress() {
return address;
}
}

View file

@ -0,0 +1,127 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.*;
import java.util.*;
import org.apache.commons.lang3.ArrayUtils;
import ghidra.app.util.bin.ByteProvider;
import ghidra.file.formats.zlib.ZLIB;
public class CramFsInputStream extends InputStream {
private CramFsInode iNode;
private List<Integer> blockPointerList = new ArrayList<>();
private List<CramFsBlock> blockList;
private List<ByteArrayOutputStream> decompressedBlockStreams = new ArrayList<>();
private List<Byte> decompressedOutputList = new ArrayList<>();
private ZLIB zlib = new ZLIB();
private ByteProvider byteProvider;
private CramFsBlockReader cramfsBlockReader;
private int defaultBlockSize;
private int currentByte = 0;
private boolean blockExtensionEnabled;
/**
* Constructor for cramfs input stream.
* @param byteProvider the underlined byte provider for the input stream.
* @param iNode the parent node for the input stream.
* @param blockExtensionEnabled the enabled block extensions for the input stream.
* @throws IOException if there is an error when creating the input stream.
*/
public CramFsInputStream(ByteProvider byteProvider, CramFsInode iNode,
boolean blockExtensionEnabled) throws IOException {
this.iNode = iNode;
this.byteProvider = byteProvider;
this.blockExtensionEnabled = blockExtensionEnabled;
defaultBlockSize = CramFsConstants.DEFAULT_BLOCK_SIZE;
blockList = getDataBlocks();
decompressAllBlocks();
combineDecompressedBlockStreams();
}
/**
* Sends the inode to the CramFs block factory and gets back a list of
* CramFs blocks for the data associated with the inode.
* @return a list of cramFs blocks to be used for decompression.
*/
private List<CramFsBlock> getDataBlocks() {
CramFsBlockFactory blockFactory =
new CramFsBlockFactory(iNode, byteProvider, blockPointerList, blockExtensionEnabled);
return blockFactory.produceBlocks();
}
/**
* Gets the Cram file system block list.
* @return the block list
*/
public List<CramFsBlock> getBlockList() {
return blockList;
}
/**
* Decompress all the data blocks that an inode points to.
* Adds the uncompressed blocks to an internal list for later processing.
* @throws IOException if there is an error when decompressing the data blocks.
*/
private void decompressAllBlocks() throws IOException {
for (int i = 0; i < cramfsBlockReader.getNumBlockPointers() - 1; i++) {
InputStream compressedIn = new ByteArrayInputStream(cramfsBlockReader.readDataBlock(i));
decompressedBlockStreams
.add(zlib.decompress(compressedIn, CramFsConstants.DEFAULT_BLOCK_SIZE));
}
}
/**
* Combines all the ZLIB decompressed block stream bytes into one list.
*/
private void combineDecompressedBlockStreams() {
for (int i = 0; i < decompressedBlockStreams.size(); i++) {
byte[] bytes = decompressedBlockStreams.get(i).toByteArray();
List<Byte> bytesList = Arrays.asList(ArrayUtils.toObject(bytes));
decompressedOutputList.addAll(bytesList);
}
}
/**
* Decompress the specified block.
* @param blockIndex the block to decompress.
* @return decompressed output stream.
* @throws IOException if zlib decompress fails.
*/
public ByteArrayOutputStream decompressBlock(int blockIndex) throws IOException {
InputStream compressedIn = new ByteArrayInputStream(blockList.get(blockIndex).readBlock());
return zlib.decompress(compressedIn, defaultBlockSize);
}
/**
* Reads one byte from the internal uncompressed output list of Bytes.
* @return The byte value from the internal list at the current read position.
* @throws IOException if there is an error while reading.
*/
@Override
public int read() throws IOException {
if (currentByte < decompressedOutputList.size()) {
byte readByte = decompressedOutputList.get(currentByte).byteValue();
currentByte++;
return Byte.toUnsignedInt(readByte);
}
return -1;
}
}

View file

@ -0,0 +1,164 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import ghidra.app.util.bin.BinaryReader;
import ghidra.app.util.bin.StructConverter;
import ghidra.program.model.data.*;
import ghidra.util.exception.DuplicateNameException;
public class CramFsSuper implements StructConverter {
private int magic;
private int size;
private int flags;
private int future;
private boolean isLE;
private String signature;
private CramFsInfo fsid;
private String name;
private CramFsInode root;
private List<CramFsInode> childList = new ArrayList<>();
/**
* Constuctor for the cramfs super block.
* @param reader binary reader for the super block.
* @throws IOException if there is an error when reading the super block.
*/
public CramFsSuper(BinaryReader reader) throws IOException {
magic = reader.readNextInt();
size = reader.readNextInt();
flags = reader.readNextInt();
future = reader.readNextInt();
signature = reader.readNextAsciiString(CramFsConstants.HEADER_STRING_LENGTH);
fsid = new CramFsInfo(reader);
name = reader.readNextAsciiString(CramFsConstants.HEADER_STRING_LENGTH);
root = new CramFsInode(reader);
isLE = reader.isLittleEndian();
for (int i = 0; i < fsid.getFiles() - 1; i++) {
childList.add(new CramFsInode(reader));
}
}
/**
* Checks to see if the CRAMFS_FLAG_EXT_BLOCK_POINTERS is set or not
* @return boolean value for if the flag is set or not
*/
public boolean isExtensionsBlockPointerFlagEnabled() {
return (flags &
CramFsConstants.CRAMFS_FLAG_EXT_BLOCK_POINTERS) == CramFsConstants.CRAMFS_FLAG_EXT_BLOCK_POINTERS;
}
/**
* Returns the magic number.
* @return the magic number
*/
public int getMagic() {
return magic;
}
/**
* Returns the size of the super block.
* @return the size of the super block
*/
public int getSize() {
return size;
}
/**
* Returns the super block flags.
* @return the super block flags.
*/
public int getFlags() {
return flags;
}
/**
* Returns the future.
* @return the future.
*/
public int getFuture() {
return future;
}
/**
* Returns if the super block is little endian or not.
* @return true if the super block is little endian, or false if not.
*/
public boolean isLittleEndian() {
return isLE;
}
/**
* Returns the super block signature.
* @return the super block signature.
*/
public String getSignature() {
return signature;
}
/**
* Returns the file system identifier.
* @return the file system identifier.
*/
public CramFsInfo getFsid() {
return fsid;
}
/**
* Returns the name of the super block.
* @return the name of the super block.
*/
public String getName() {
return name;
}
/**
* Returns the root node of the super block.
* @return the root node of the super block.
*/
public CramFsInode getRoot() {
return root;
}
/**
* Returns the childList of the super block.
* @return the childList of the super block.
*/
public List<CramFsInode> getChildList() {
return childList;
}
@Override
public DataType toDataType() throws DuplicateNameException, IOException {
Structure struct = new StructureDataType("cramfs_super", 0);
struct.add(DWORD, "magic", null);
struct.add(DWORD, "size", null);
struct.add(DWORD, "flags", null);
struct.add(DWORD, "future", null);
struct.add(STRING, CramFsConstants.HEADER_STRING_LENGTH, "signature", null);
struct.add(fsid.toDataType(), "fsid", null);
struct.add(STRING, CramFsConstants.HEADER_STRING_LENGTH, "name", null);
struct.add(root.toDataType(), "root", null);
return struct;
}
}

View file

@ -0,0 +1,54 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.cramfs;
import java.io.*;
import ghidra.app.util.bin.ByteProvider;
public class LazyCramFsInputStream extends InputStream {
private CramFsBlockReader cramFsBlockReader;
private InputStream currentDecompressedBlockInputStream = new ByteArrayInputStream(new byte[0]);
private int currentCompressedBlockIndex;
/**
* Constructor for lazy cramfs input stream.
* @param provider byte provider for the input stream.
* @param cramfsInode the parent node for the input stream.
* @param isLittleEndian returns true if the input stream is little endian.
* @throws IOException if there is an error when creating the input stream.
*/
public LazyCramFsInputStream(ByteProvider provider, CramFsInode cramfsInode,
boolean isLittleEndian) throws IOException {
cramFsBlockReader = new CramFsBlockReader(provider, cramfsInode, isLittleEndian);
}
@Override
public int read() throws IOException {
int byteRead = currentDecompressedBlockInputStream.read();
if (byteRead == -1) {
if (currentCompressedBlockIndex < cramFsBlockReader.getNumBlockPointers()) {
currentDecompressedBlockInputStream =
cramFsBlockReader.readDataBlockDecompressed(currentCompressedBlockIndex);
byteRead = currentDecompressedBlockInputStream.read();
currentCompressedBlockIndex++;
}
}
return byteRead;
}
}