Merge branch 'master' of ssh://vamos.informatik.uni-erlangen.de:29418/fail

This commit is contained in:
Björn Döbel
2013-09-10 11:54:29 +02:00
37 changed files with 451 additions and 81 deletions

View File

@ -8,9 +8,9 @@ namespace fail {
void CoroutineManager::m_invoke(void* pData)
{
//std::cerr << "CORO m_invoke " << co_current() << std::endl;
// TODO: Log-Level?
reinterpret_cast<ExperimentFlow*>(pData)->coroutine_entry();
ExperimentFlow *flow = reinterpret_cast<ExperimentFlow*>(pData);
flow->coroutine_entry();
simulator.removeFlow(flow);
//m_togglerstack.pop();
// FIXME: need to pop our caller
co_exit(); // deletes the associated coroutine memory as well
@ -20,7 +20,12 @@ void CoroutineManager::m_invoke(void* pData)
while (1); // freeze.
}
CoroutineManager::~CoroutineManager() { }
CoroutineManager::~CoroutineManager()
{
// Note that we do not destroy the associated coroutines; this causes
// problems when shutting down.
m_Flows.clear();
}
void CoroutineManager::toggle(ExperimentFlow* flow)
{
@ -53,7 +58,9 @@ void CoroutineManager::remove(ExperimentFlow* flow)
// find coroutine handle for this flow
flowmap_t::iterator it = m_Flows.find(flow);
if (it == m_Flows.end()) {
assert(false && "FATAL ERROR: Cannot remove flow");
// Not finding the flow to remove is not an error; especially when
// shutting down this is the common case, as ~CoroutineManager probably
// clears the flow list before the ExperimentFlow destructors run.
return;
}
corohandle_t coro = it->second;
@ -67,7 +74,9 @@ void CoroutineManager::remove(ExperimentFlow* flow)
// delete coroutine (and handle the special case we're removing
// ourselves)
if (coro == co_current()) {
co_exit();
if (!m_Terminated) {
co_exit();
}
} else {
co_delete(coro);
}

View File

@ -29,10 +29,12 @@ private:
std::stack<corohandle_t> m_togglerstack;
//! manages the run-calls for each ExperimentFlow-object
static void m_invoke(void* pData);
//! \c true if terminated explicitly using simulator.terminate()
bool m_Terminated;
public:
static const ExperimentFlow* SIM_FLOW; //!< the simulator coroutine flow
CoroutineManager() : m_simCoro(co_current()) { }
CoroutineManager() : m_simCoro(co_current()), m_Terminated(false) { }
~CoroutineManager();
/**
* Creates a new coroutine for the specified experiment flow.
@ -63,6 +65,12 @@ public:
* @return the current experiment flow.
*/
ExperimentFlow* getCurrent();
/**
* Sets the termination flag. This should be called when Fail
* exists due to a call to \c ::exit() (used, e.g., in
* \c SimulatorController::terminate()). This cannot be undone.
*/
void setTerminated() { m_Terminated = true; }
};
} // end-of-namespace: fail

View File

@ -13,6 +13,11 @@ namespace fail {
class ExperimentFlow {
public:
ExperimentFlow() { }
virtual ~ExperimentFlow()
{
simulator.clearListeners(this); // remove residual events
simulator.removeFlow(this);
}
/**
* Defines the experiment flow.
* @return \c true if the experiment was successful, \c false otherwise

View File

@ -217,6 +217,8 @@ void SimulatorController::terminate(int exCode)
// Attention: This could cause problems, e.g., because of non-closed sockets
std::cout << "[FAIL] Exit called by experiment with exit code: " << exCode << std::endl;
// TODO: (Non-)Verbose-Mode? Log-Level?
m_Flows.setTerminated(); // we are about to terminate
exit(exCode);
}

View File

@ -48,6 +48,10 @@ if(${LIB_IBERTY} STREQUAL LIB_IBERTY-NOTFOUND)
message(FATAL_ERROR "libiberty not found. Try installing binutils-dev: [ sudo aptitude install binutils-dev ]")
endif()
# libz required by gzstream
find_package(ZLIB REQUIRED)
include_directories(${ZLIB_INCLUDE_DIRS})
# objdump required by Diassembler.cc
set(THE_OBJDUMP "${ARCH_TOOL_PREFIX}objdump")
@ -61,7 +65,7 @@ mark_as_advanced(FAIL_OBJDUMP)
add_library(fail-util ${SRCS})
add_dependencies(fail-util fail-comm)
target_link_libraries(fail-util ${PROTOBUF_LIBRARY} ${Boost_LIBRARIES} ${LIB_IBERTY} )
target_link_libraries(fail-util ${PROTOBUF_LIBRARY} ${Boost_LIBRARIES} ${LIB_IBERTY} ${ZLIB_LIBRARIES})
option(BUILD_LLVM_DISASSEMBLER "Build the LLVM-based disassembler (LLVM 3.3 preferred, for 3.1 and 3.2 read doc/how-to-build.txt)" OFF)
if (BUILD_LLVM_DISASSEMBLER)

View File

@ -1,4 +1,3 @@
#ifndef __puma
#include "LLVMDisassembler.hpp"
using namespace fail;
@ -147,5 +146,3 @@ void LLVMDisassembler::disassemble()
}
void LLVMDisassembler::StringRefMemoryObject::anchor() {}
#endif

View File

@ -1,8 +1,6 @@
#ifndef __LLVMDISASSEMBLER_HPP__
#define __LLVMDISASSEMBLER_HPP__
#ifndef __puma
#include <iostream>
#include <vector>
#include <map>
@ -137,5 +135,4 @@ public:
}
#endif // puma
#endif // __LLVMDISASSEMBLER_HPP__

View File

@ -4,7 +4,6 @@
using namespace fail;
LLVMtoFailBochs::LLVMtoFailBochs() {
#ifndef __puma
/* These magic numbers are taken from the llvm compiler (MC), they
do not appear in any header. They hopefully will never
change */
@ -19,12 +18,12 @@ LLVMtoFailBochs::LLVMtoFailBochs() {
llvm_to_fail_map[45] = reginfo_t(RID_CBX, 32, 0); // EBX
llvm_to_fail_map[9] = reginfo_t(RID_CCX, 8, 8); // CH
llvm_to_fail_map[10] = reginfo_t(RID_CCX, 0xff); // CL
llvm_to_fail_map[10] = reginfo_t(RID_CCX, 8, 0); // CL
llvm_to_fail_map[28] = reginfo_t(RID_CCX, 16, 0); // CX
llvm_to_fail_map[46] = reginfo_t(RID_CCX); // ECX
llvm_to_fail_map[29] = reginfo_t(RID_CDX, 8, 8); // DH
llvm_to_fail_map[32] = reginfo_t(RID_CDX, 0xff); // DL
llvm_to_fail_map[32] = reginfo_t(RID_CDX, 8, 0); // DL
llvm_to_fail_map[42] = reginfo_t(RID_CDX, 16, 0); // DX
llvm_to_fail_map[48] = reginfo_t(RID_CDX); // EDX
@ -46,5 +45,4 @@ LLVMtoFailBochs::LLVMtoFailBochs() {
llvm_to_fail_map[54] = reginfo_t(RID_CSP); // ESP
llvm_to_fail_map[117] = reginfo_t(RID_CSP, 16, 0); // SP
llvm_to_fail_map[118] = reginfo_t(RID_CSP, 8, 0); // SPL
#endif
}

View File

@ -4,7 +4,6 @@
using namespace fail;
LLVMtoFailGem5::LLVMtoFailGem5() {
#ifndef __puma
/* These magic numbers are taken from the machine descriptions of
LLVM they (hopefully) will not change, since they are not exported
via a header */
@ -24,5 +23,4 @@ LLVMtoFailGem5::LLVMtoFailGem5() {
llvm_to_fail_map[105] = reginfo_t(RI_SP);
llvm_to_fail_map[40] = reginfo_t(RI_LR);
llvm_to_fail_map[43] = reginfo_t(RI_IP);
#endif
}

View File

@ -4,16 +4,14 @@
using namespace fail;
const LLVMtoFailTranslator::reginfo_t & LLVMtoFailTranslator::getFailRegisterID(unsigned int regid) {
#ifndef __puma
ltof_map_t::iterator it = llvm_to_fail_map.find(regid);
if( it != llvm_to_fail_map.end() ) {// found
return (*it).second;
} else { // not found
std::cout << "Fail ID for LLVM Register id " << regid << " not found :(" << std::endl;
std::cout << "Fail ID for LLVM Register id " << std::dec << regid << " not found :(" << std::endl;
//exit(EXIT_FAILURE);
return notfound;
}
#endif
}
regdata_t LLVMtoFailTranslator::getRegisterContent(ConcreteCPU& cpu, const reginfo_t &reginfo){

View File

@ -13,6 +13,12 @@ namespace fail {
*/
class LLVMtoFailTranslator {
public:
/**
* Maps registers to/from linear addresses usable for def/use-pruning
* purposes and storage in the database. Takes care that the linear
* addresses of x86 subregisters (e.g., AX represents the lower 16 bits of
* EAX) overlap with their siblings.
*/
struct reginfo_t {
int id;
regwidth_t width;
@ -20,18 +26,17 @@ public:
byte_t offset;
int toDataAddress() const {
// .. 5 4 | 7 6 5 4 | 3 2 1 0
// <reg> | <width> | <offset>
return (id << 8) | ((width/8) << 4) | (offset / 8);
// .. 5 4 | 3 2 1 0
// <reg> | <offset>
return (id << 4) | (offset / 8);
}
// does not recreate width or mask
static reginfo_t fromDataAddress(int addr) {
int id = addr >> 8;
regwidth_t width = ((addr >> 4) & 0xf) * 8;
byte_t offset = (addr & 0xf) * 8;
return reginfo_t(id, width, offset);
int id = addr >> 4;
byte_t offset = (addr & 0xf) * 8;
return reginfo_t(id, 0, offset);
}
reginfo_t(int id=-1, regwidth_t width = 32, byte_t offs = 0)
: id(id), width(width), mask((regwidth_t)((((long long)1 << width) - 1) << offs)), offset(offs) {};
};
@ -39,13 +44,16 @@ protected:
LLVMtoFailTranslator(){};
#ifndef __puma
typedef std::map<unsigned int, struct reginfo_t> ltof_map_t;
ltof_map_t llvm_to_fail_map;
#endif
public:
/**
* Translates a backend-specific register ID to a Fail register ID.
* @param regid A backend-specific register ID.
* @return A Fail* register ID, or LLVMtoFailTranslator::notfound if no
* mapping was found.
*/
const reginfo_t & getFailRegisterID(unsigned int regid);
regdata_t getRegisterContent(ConcreteCPU & cpu, const reginfo_t & reg);
@ -58,7 +66,7 @@ public:
}
int getFailRegisterId(unsigned int regid) { return this->getFailRegisterID(regid).id; };
private:
reginfo_t notfound;
};

View File

@ -21,6 +21,9 @@ find_package(Protobuf REQUIRED)
include_directories(${PROTOBUF_INCLUDE_DIRS})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
find_package(MySQL REQUIRED)
include_directories(${MYSQL_INCLUDE_DIR})
PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${MY_PROTOS})
## Build library
@ -30,6 +33,6 @@ target_link_libraries(fail-${EXPERIMENT_NAME} ${PROTOBUF_LIBRARY})
## This is the example's campaign server distributing experiment parameters
add_executable(${EXPERIMENT_NAME}-server main.cc)
target_link_libraries(${EXPERIMENT_NAME}-server -Wl,--start-group fail-${EXPERIMENT_NAME} fail-sal fail-util fail-cpn fail-comm ${PROTOBUF_LIBRARY} ${Boost_THREAD_LIBRARY} -lmysqlclient -Wl,--end-group)
target_link_libraries(${EXPERIMENT_NAME}-server -Wl,--start-group fail-${EXPERIMENT_NAME} fail-sal fail-util fail-cpn fail-comm ${PROTOBUF_LIBRARY} ${Boost_THREAD_LIBRARY} ${MYSQL_LIBRARIES} -Wl,--end-group)
install(TARGETS ${EXPERIMENT_NAME}-server RUNTIME DESTINATION bin)

View File

@ -22,14 +22,17 @@ find_package(Protobuf REQUIRED)
include_directories(${PROTOBUF_INCLUDE_DIRS})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
find_package(MySQL REQUIRED)
include_directories(${MYSQL_INCLUDE_DIR})
PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${MY_PROTOS})
## Build library
add_library(fail-${EXPERIMENT_NAME} ${PROTO_SRCS} ${PROTO_HDRS} ${MY_CAMPAIGN_SRCS})
add_dependencies(fail-${EXPERIMENT_NAME} fail-tracing fail-comm)
target_link_libraries(fail-${EXPERIMENT_NAME} ${PROTOBUF_LIBRARY} -lmysqlclient_r)
target_link_libraries(fail-${EXPERIMENT_NAME} ${PROTOBUF_LIBRARY} ${MYSQL_LIBRARIES})
## This is the example's campaign server distributing experiment parameters
add_executable(${EXPERIMENT_NAME}-server main.cc)
target_link_libraries(${EXPERIMENT_NAME}-server fail-${EXPERIMENT_NAME} fail ${PROTOBUF_LIBRARY} ${Boost_THREAD_LIBRARY} -lmysqlclient_r)
target_link_libraries(${EXPERIMENT_NAME}-server fail-${EXPERIMENT_NAME} fail ${PROTOBUF_LIBRARY} ${Boost_THREAD_LIBRARY} ${MYSQL_LIBRARIES})
install(TARGETS ${EXPERIMENT_NAME}-server RUNTIME DESTINATION bin)

View File

@ -1,8 +1,7 @@
#include "experiment.hpp"
#include "sal/SALInst.hpp"
static EcosKernelTestExperiment experiment;
void instantiateEcosKernelTestExperiment()
{
fail::simulator.addFlow(&experiment);
fail::simulator.addFlow(new EcosKernelTestExperiment);
}

View File

@ -12,6 +12,10 @@
// You need to provide the implementation of this function in your experiment
// directory:
void instantiate@EXPERIMENT_TYPE@();
// The experiment needs to be instantiated dynamically (on the stack, or the
// heap), as the ExperimentFlow destructor deregisters from the
// CoroutineManager which may not exist anymore if the global
// construction/destruction order is inappropriate.
aspect @EXPERIMENT_TYPE@ExperimentHook {
advice execution ("void fail::SimulatorController::initExperiments()") : after () {

View File

@ -9,10 +9,14 @@
#include "../experiments/@EXPERIMENT_NAME@/experiment.hpp"
#include "sal/SALInst.hpp"
// The experiment needs to be instantiated dynamically (on the stack, or the
// heap), as the ExperimentFlow destructor deregisters from the
// CoroutineManager which may not exist anymore if the global
// construction/destruction order is inappropriate.
aspect @EXPERIMENT_TYPE@ExperimentHook {
@EXPERIMENT_TYPE@ experiment;
advice execution ("void fail::SimulatorController::initExperiments()") : after () {
fail::simulator.addFlow(&experiment);
fail::simulator.addFlow(new @EXPERIMENT_TYPE@);
}
};

View File

@ -21,6 +21,9 @@ find_package(Protobuf REQUIRED)
include_directories(${PROTOBUF_INCLUDE_DIRS})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
find_package(MySQL REQUIRED)
include_directories(${MYSQL_INCLUDE_DIR})
PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${MY_PROTOS})
## Build library
@ -30,5 +33,5 @@ target_link_libraries(fail-${EXPERIMENT_NAME} ${PROTOBUF_LIBRARY})
## This is the example's campaign server distributing experiment parameters
add_executable(${EXPERIMENT_NAME}-server main.cc)
target_link_libraries(${EXPERIMENT_NAME}-server -Wl,--start-group fail-${EXPERIMENT_NAME} fail-sal fail-util fail-cpn fail-comm ${PROTOBUF_LIBRARY} ${Boost_THREAD_LIBRARY} -lmysqlclient -Wl,--end-group)
target_link_libraries(${EXPERIMENT_NAME}-server -Wl,--start-group fail-${EXPERIMENT_NAME} fail-sal fail-util fail-cpn fail-comm ${PROTOBUF_LIBRARY} ${Boost_THREAD_LIBRARY} ${MYSQL_LIBRARIES} -Wl,--end-group)
install(TARGETS ${EXPERIMENT_NAME}-server RUNTIME DESTINATION bin)

View File

@ -1,8 +1,7 @@
#include "experiment.hpp"
#include "sal/SALInst.hpp"
static NanoJPEGExperiment experiment;
void instantiateNanoJPEGExperiment()
{
fail::simulator.addFlow(&experiment);
fail::simulator.addFlow(new NanoJPEGExperiment);
}

View File

@ -1,8 +1,7 @@
#include "experiment.hpp"
#include "sal/SALInst.hpp"
static RAMpageExperiment experiment;
void instantiateRAMpageExperiment()
{
fail::simulator.addFlow(&experiment);
fail::simulator.addFlow(new RAMpageExperiment);
}