Adding gem5 source to svn.
git-svn-id: https://www4.informatik.uni-erlangen.de/i4svn/danceos/trunk/devel/fail@1819 8c4709b5-6ec9-48aa-a5cd-a96041d1645a
This commit is contained in:
58
simulators/gem5/src/mem/ruby/system/AbstractMemOrCache.hh
Normal file
58
simulators/gem5/src/mem/ruby/system/AbstractMemOrCache.hh
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_ABSTRACTMEMORCACHE_HH__
|
||||
#define __MEM_RUBY_SYSTEM_ABSTRACTMEMORCACHE_HH__
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
#include "mem/ruby/slicc_interface/Message.hh"
|
||||
|
||||
class Consumer;
|
||||
class MemoryNode;
|
||||
class Message;
|
||||
|
||||
class AbstractMemOrCache
|
||||
{
|
||||
public:
|
||||
virtual ~AbstractMemOrCache() {};
|
||||
virtual void setConsumer(Consumer* consumer_ptr) = 0;
|
||||
virtual Consumer* getConsumer() = 0;
|
||||
|
||||
virtual void enqueue (const MsgPtr& message, int latency) = 0;
|
||||
virtual void enqueueMemRef (MemoryNode& memRef) = 0;
|
||||
virtual void dequeue () = 0;
|
||||
virtual const Message* peek () = 0;
|
||||
virtual bool isReady () = 0;
|
||||
virtual MemoryNode peekNode () = 0;
|
||||
virtual bool areNSlotsAvailable (int n) = 0;
|
||||
virtual void printConfig (std::ostream& out) = 0;
|
||||
virtual void print (std::ostream& out) const = 0;
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_ABSTRACTMEMORCACHE_HH__
|
||||
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
|
||||
|
||||
#include "mem/ruby/common/TypeDefines.hh"
|
||||
|
||||
class AbstractReplacementPolicy
|
||||
{
|
||||
public:
|
||||
AbstractReplacementPolicy(Index num_sets, Index assoc);
|
||||
virtual ~AbstractReplacementPolicy();
|
||||
|
||||
/* touch a block. a.k.a. update timestamp */
|
||||
virtual void touch(Index set, Index way, Time time) = 0;
|
||||
|
||||
/* returns the way to replace */
|
||||
virtual Index getVictim(Index set) const = 0;
|
||||
|
||||
/* get the time of the last access */
|
||||
Time getLastAccess(Index set, Index way);
|
||||
|
||||
protected:
|
||||
unsigned m_num_sets; /** total number of sets */
|
||||
unsigned m_assoc; /** set associativity */
|
||||
Time **m_last_ref_ptr; /** timestamp of last reference */
|
||||
};
|
||||
|
||||
inline
|
||||
AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets,
|
||||
Index assoc)
|
||||
{
|
||||
m_num_sets = num_sets;
|
||||
m_assoc = assoc;
|
||||
m_last_ref_ptr = new Time*[m_num_sets];
|
||||
for(unsigned i = 0; i < m_num_sets; i++){
|
||||
m_last_ref_ptr[i] = new Time[m_assoc];
|
||||
for(unsigned j = 0; j < m_assoc; j++){
|
||||
m_last_ref_ptr[i][j] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
AbstractReplacementPolicy::~AbstractReplacementPolicy()
|
||||
{
|
||||
if (m_last_ref_ptr != NULL){
|
||||
for (unsigned i = 0; i < m_num_sets; i++){
|
||||
if (m_last_ref_ptr[i] != NULL){
|
||||
delete[] m_last_ref_ptr[i];
|
||||
}
|
||||
}
|
||||
delete[] m_last_ref_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
inline Time
|
||||
AbstractReplacementPolicy::getLastAccess(Index set, Index way)
|
||||
{
|
||||
return m_last_ref_ptr[set][way];
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
|
||||
42
simulators/gem5/src/mem/ruby/system/Cache.py
Normal file
42
simulators/gem5/src/mem/ruby/system/Cache.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Steve Reinhardt
|
||||
# Brad Beckmann
|
||||
|
||||
from m5.params import *
|
||||
from m5.SimObject import SimObject
|
||||
from Controller import RubyController
|
||||
|
||||
class RubyCache(SimObject):
|
||||
type = 'RubyCache'
|
||||
cxx_class = 'CacheMemory'
|
||||
size = Param.MemorySize("capacity in bytes");
|
||||
latency = Param.Int("");
|
||||
assoc = Param.Int("");
|
||||
replacement_policy = Param.String("PSEUDO_LRU", "");
|
||||
start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line");
|
||||
is_icache = Param.Bool(False, "is instruction only cache");
|
||||
478
simulators/gem5/src/mem/ruby/system/CacheMemory.cc
Normal file
478
simulators/gem5/src/mem/ruby/system/CacheMemory.cc
Normal file
@ -0,0 +1,478 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "base/intmath.hh"
|
||||
#include "debug/RubyCache.hh"
|
||||
#include "debug/RubyCacheTrace.hh"
|
||||
#include "mem/protocol/AccessPermission.hh"
|
||||
#include "mem/ruby/system/CacheMemory.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
ostream&
|
||||
operator<<(ostream& out, const CacheMemory& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
CacheMemory *
|
||||
RubyCacheParams::create()
|
||||
{
|
||||
return new CacheMemory(this);
|
||||
}
|
||||
|
||||
CacheMemory::CacheMemory(const Params *p)
|
||||
: SimObject(p)
|
||||
{
|
||||
m_cache_size = p->size;
|
||||
m_latency = p->latency;
|
||||
m_cache_assoc = p->assoc;
|
||||
m_policy = p->replacement_policy;
|
||||
m_profiler_ptr = new CacheProfiler(name());
|
||||
m_start_index_bit = p->start_index_bit;
|
||||
m_is_instruction_only_cache = p->is_icache;
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::init()
|
||||
{
|
||||
m_cache_num_sets = (m_cache_size / m_cache_assoc) /
|
||||
RubySystem::getBlockSizeBytes();
|
||||
assert(m_cache_num_sets > 1);
|
||||
m_cache_num_set_bits = floorLog2(m_cache_num_sets);
|
||||
assert(m_cache_num_set_bits > 0);
|
||||
|
||||
if (m_policy == "PSEUDO_LRU")
|
||||
m_replacementPolicy_ptr =
|
||||
new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
|
||||
else if (m_policy == "LRU")
|
||||
m_replacementPolicy_ptr =
|
||||
new LRUPolicy(m_cache_num_sets, m_cache_assoc);
|
||||
else
|
||||
assert(false);
|
||||
|
||||
m_cache.resize(m_cache_num_sets);
|
||||
for (int i = 0; i < m_cache_num_sets; i++) {
|
||||
m_cache[i].resize(m_cache_assoc);
|
||||
for (int j = 0; j < m_cache_assoc; j++) {
|
||||
m_cache[i][j] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CacheMemory::~CacheMemory()
|
||||
{
|
||||
if (m_replacementPolicy_ptr != NULL)
|
||||
delete m_replacementPolicy_ptr;
|
||||
delete m_profiler_ptr;
|
||||
for (int i = 0; i < m_cache_num_sets; i++) {
|
||||
for (int j = 0; j < m_cache_assoc; j++) {
|
||||
delete m_cache[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::printConfig(ostream& out)
|
||||
{
|
||||
int block_size = RubySystem::getBlockSizeBytes();
|
||||
|
||||
out << "Cache config: " << m_cache_name << endl;
|
||||
out << " cache_associativity: " << m_cache_assoc << endl;
|
||||
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
|
||||
const int cache_num_sets = 1 << m_cache_num_set_bits;
|
||||
out << " num_cache_sets: " << cache_num_sets << endl;
|
||||
out << " cache_set_size_bytes: " << cache_num_sets * block_size << endl;
|
||||
out << " cache_set_size_Kbytes: "
|
||||
<< double(cache_num_sets * block_size) / (1<<10) << endl;
|
||||
out << " cache_set_size_Mbytes: "
|
||||
<< double(cache_num_sets * block_size) / (1<<20) << endl;
|
||||
out << " cache_size_bytes: "
|
||||
<< cache_num_sets * block_size * m_cache_assoc << endl;
|
||||
out << " cache_size_Kbytes: "
|
||||
<< double(cache_num_sets * block_size * m_cache_assoc) / (1<<10)
|
||||
<< endl;
|
||||
out << " cache_size_Mbytes: "
|
||||
<< double(cache_num_sets * block_size * m_cache_assoc) / (1<<20)
|
||||
<< endl;
|
||||
}
|
||||
|
||||
// convert a Address to its location in the cache
|
||||
Index
|
||||
CacheMemory::addressToCacheSet(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
return address.bitSelect(m_start_index_bit,
|
||||
m_start_index_bit + m_cache_num_set_bits - 1);
|
||||
}
|
||||
|
||||
// Given a cache index: returns the index of the tag in a set.
|
||||
// returns -1 if the tag is not found.
|
||||
int
|
||||
CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
|
||||
{
|
||||
assert(tag == line_address(tag));
|
||||
// search the set for the tags
|
||||
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
|
||||
if (it != m_tag_index.end())
|
||||
if (m_cache[cacheSet][it->second]->m_Permission !=
|
||||
AccessPermission_NotPresent)
|
||||
return it->second;
|
||||
return -1; // Not found
|
||||
}
|
||||
|
||||
// Given a cache index: returns the index of the tag in a set.
|
||||
// returns -1 if the tag is not found.
|
||||
int
|
||||
CacheMemory::findTagInSetIgnorePermissions(Index cacheSet,
|
||||
const Address& tag) const
|
||||
{
|
||||
assert(tag == line_address(tag));
|
||||
// search the set for the tags
|
||||
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
|
||||
if (it != m_tag_index.end())
|
||||
return it->second;
|
||||
return -1; // Not found
|
||||
}
|
||||
|
||||
bool
|
||||
CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type,
|
||||
DataBlock*& data_ptr)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
DPRINTF(RubyCache, "address: %s\n", address);
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
if (loc != -1) {
|
||||
// Do we even have a tag match?
|
||||
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
|
||||
m_replacementPolicy_ptr->
|
||||
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
|
||||
data_ptr = &(entry->getDataBlk());
|
||||
|
||||
if (entry->m_Permission == AccessPermission_Read_Write) {
|
||||
return true;
|
||||
}
|
||||
if ((entry->m_Permission == AccessPermission_Read_Only) &&
|
||||
(type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
|
||||
return true;
|
||||
}
|
||||
// The line must not be accessible
|
||||
}
|
||||
data_ptr = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
CacheMemory::testCacheAccess(const Address& address, RubyRequestType type,
|
||||
DataBlock*& data_ptr)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
DPRINTF(RubyCache, "address: %s\n", address);
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
|
||||
if (loc != -1) {
|
||||
// Do we even have a tag match?
|
||||
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
|
||||
m_replacementPolicy_ptr->
|
||||
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
|
||||
data_ptr = &(entry->getDataBlk());
|
||||
|
||||
return m_cache[cacheSet][loc]->m_Permission !=
|
||||
AccessPermission_NotPresent;
|
||||
}
|
||||
|
||||
data_ptr = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// tests to see if an address is present in the cache
|
||||
bool
|
||||
CacheMemory::isTagPresent(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
|
||||
if (loc == -1) {
|
||||
// We didn't find the tag
|
||||
DPRINTF(RubyCache, "No tag match for address: %s\n", address);
|
||||
return false;
|
||||
}
|
||||
DPRINTF(RubyCache, "address: %s found\n", address);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if there is:
|
||||
// a) a tag match on this address or there is
|
||||
// b) an unused line in the same cache "way"
|
||||
bool
|
||||
CacheMemory::cacheAvail(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
|
||||
for (int i = 0; i < m_cache_assoc; i++) {
|
||||
AbstractCacheEntry* entry = m_cache[cacheSet][i];
|
||||
if (entry != NULL) {
|
||||
if (entry->m_Address == address ||
|
||||
entry->m_Permission == AccessPermission_NotPresent) {
|
||||
// Already in the cache or we found an empty entry
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
AbstractCacheEntry*
|
||||
CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(!isTagPresent(address));
|
||||
assert(cacheAvail(address));
|
||||
DPRINTF(RubyCache, "address: %s\n", address);
|
||||
|
||||
// Find the first open slot
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
|
||||
for (int i = 0; i < m_cache_assoc; i++) {
|
||||
if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
|
||||
set[i] = entry; // Init entry
|
||||
set[i]->m_Address = address;
|
||||
set[i]->m_Permission = AccessPermission_Invalid;
|
||||
DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
|
||||
address);
|
||||
set[i]->m_locked = -1;
|
||||
m_tag_index[address] = i;
|
||||
|
||||
m_replacementPolicy_ptr->
|
||||
touch(cacheSet, i, g_eventQueue_ptr->getTime());
|
||||
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
panic("Allocate didn't find an available entry");
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::deallocate(const Address& address)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(isTagPresent(address));
|
||||
DPRINTF(RubyCache, "address: %s\n", address);
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
if (loc != -1) {
|
||||
delete m_cache[cacheSet][loc];
|
||||
m_cache[cacheSet][loc] = NULL;
|
||||
m_tag_index.erase(address);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns with the physical address of the conflicting cache line
|
||||
Address
|
||||
CacheMemory::cacheProbe(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(!cacheAvail(address));
|
||||
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
|
||||
m_Address;
|
||||
}
|
||||
|
||||
// looks an address up in the cache
|
||||
AbstractCacheEntry*
|
||||
CacheMemory::lookup(const Address& address)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
if(loc == -1) return NULL;
|
||||
return m_cache[cacheSet][loc];
|
||||
}
|
||||
|
||||
// looks an address up in the cache
|
||||
const AbstractCacheEntry*
|
||||
CacheMemory::lookup(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
if(loc == -1) return NULL;
|
||||
return m_cache[cacheSet][loc];
|
||||
}
|
||||
|
||||
// Sets the most recently used bit for a cache block
|
||||
void
|
||||
CacheMemory::setMRU(const Address& address)
|
||||
{
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
|
||||
if(loc != -1)
|
||||
m_replacementPolicy_ptr->
|
||||
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::profileMiss(const RubyRequest& msg)
|
||||
{
|
||||
m_profiler_ptr->addCacheStatSample(msg.getType(),
|
||||
msg.getAccessMode(),
|
||||
msg.getPrefetch());
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::profileGenericRequest(GenericRequestType requestType,
|
||||
RubyAccessMode accessType,
|
||||
PrefetchBit pfBit)
|
||||
{
|
||||
m_profiler_ptr->addGenericStatSample(requestType,
|
||||
accessType,
|
||||
pfBit);
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
|
||||
{
|
||||
uint64 warmedUpBlocks = 0;
|
||||
uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
|
||||
* (uint64)m_cache_assoc;
|
||||
|
||||
for (int i = 0; i < m_cache_num_sets; i++) {
|
||||
for (int j = 0; j < m_cache_assoc; j++) {
|
||||
if (m_cache[i][j] != NULL) {
|
||||
AccessPermission perm = m_cache[i][j]->m_Permission;
|
||||
RubyRequestType request_type = RubyRequestType_NULL;
|
||||
if (perm == AccessPermission_Read_Only) {
|
||||
if (m_is_instruction_only_cache) {
|
||||
request_type = RubyRequestType_IFETCH;
|
||||
} else {
|
||||
request_type = RubyRequestType_LD;
|
||||
}
|
||||
} else if (perm == AccessPermission_Read_Write) {
|
||||
request_type = RubyRequestType_ST;
|
||||
}
|
||||
|
||||
if (request_type != RubyRequestType_NULL) {
|
||||
tr->addRecord(cntrl, m_cache[i][j]->m_Address.getAddress(),
|
||||
0, request_type,
|
||||
m_replacementPolicy_ptr->getLastAccess(i, j),
|
||||
m_cache[i][j]->getDataBlk());
|
||||
warmedUpBlocks++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
|
||||
"recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
|
||||
(uint64)m_cache_num_sets * (uint64)m_cache_assoc,
|
||||
(float(warmedUpBlocks)/float(totalBlocks))*100.0);
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::print(ostream& out) const
|
||||
{
|
||||
out << "Cache dump: " << m_cache_name << endl;
|
||||
for (int i = 0; i < m_cache_num_sets; i++) {
|
||||
for (int j = 0; j < m_cache_assoc; j++) {
|
||||
if (m_cache[i][j] != NULL) {
|
||||
out << " Index: " << i
|
||||
<< " way: " << j
|
||||
<< " entry: " << *m_cache[i][j] << endl;
|
||||
} else {
|
||||
out << " Index: " << i
|
||||
<< " way: " << j
|
||||
<< " entry: NULL" << endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::printData(ostream& out) const
|
||||
{
|
||||
out << "printData() not supported" << endl;
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::clearStats() const
|
||||
{
|
||||
m_profiler_ptr->clearStats();
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::printStats(ostream& out) const
|
||||
{
|
||||
m_profiler_ptr->printStats(out);
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::setLocked(const Address& address, int context)
|
||||
{
|
||||
DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
|
||||
assert(address == line_address(address));
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
assert(loc != -1);
|
||||
m_cache[cacheSet][loc]->m_locked = context;
|
||||
}
|
||||
|
||||
void
|
||||
CacheMemory::clearLocked(const Address& address)
|
||||
{
|
||||
DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
|
||||
assert(address == line_address(address));
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
assert(loc != -1);
|
||||
m_cache[cacheSet][loc]->m_locked = -1;
|
||||
}
|
||||
|
||||
bool
|
||||
CacheMemory::isLocked(const Address& address, int context)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
Index cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
assert(loc != -1);
|
||||
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
|
||||
address, m_cache[cacheSet][loc]->m_locked, context);
|
||||
return m_cache[cacheSet][loc]->m_locked == context;
|
||||
}
|
||||
|
||||
157
simulators/gem5/src/mem/ruby/system/CacheMemory.hh
Normal file
157
simulators/gem5/src/mem/ruby/system/CacheMemory.hh
Normal file
@ -0,0 +1,157 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "base/hashmap.hh"
|
||||
#include "mem/protocol/GenericRequestType.hh"
|
||||
#include "mem/protocol/RubyRequest.hh"
|
||||
#include "mem/ruby/common/DataBlock.hh"
|
||||
#include "mem/ruby/profiler/CacheProfiler.hh"
|
||||
#include "mem/ruby/recorder/CacheRecorder.hh"
|
||||
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
|
||||
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
|
||||
#include "mem/ruby/system/LRUPolicy.hh"
|
||||
#include "mem/ruby/system/PseudoLRUPolicy.hh"
|
||||
#include "params/RubyCache.hh"
|
||||
#include "sim/sim_object.hh"
|
||||
|
||||
class CacheMemory : public SimObject
|
||||
{
|
||||
public:
|
||||
typedef RubyCacheParams Params;
|
||||
CacheMemory(const Params *p);
|
||||
~CacheMemory();
|
||||
|
||||
void init();
|
||||
|
||||
// Public Methods
|
||||
void printConfig(std::ostream& out);
|
||||
|
||||
// perform a cache access and see if we hit or not. Return true on a hit.
|
||||
bool tryCacheAccess(const Address& address, RubyRequestType type,
|
||||
DataBlock*& data_ptr);
|
||||
|
||||
// similar to above, but doesn't require full access check
|
||||
bool testCacheAccess(const Address& address, RubyRequestType type,
|
||||
DataBlock*& data_ptr);
|
||||
|
||||
// tests to see if an address is present in the cache
|
||||
bool isTagPresent(const Address& address) const;
|
||||
|
||||
// Returns true if there is:
|
||||
// a) a tag match on this address or there is
|
||||
// b) an unused line in the same cache "way"
|
||||
bool cacheAvail(const Address& address) const;
|
||||
|
||||
// find an unused entry and sets the tag appropriate for the address
|
||||
AbstractCacheEntry* allocate(const Address& address, AbstractCacheEntry* new_entry);
|
||||
void allocateVoid(const Address& address, AbstractCacheEntry* new_entry)
|
||||
{
|
||||
allocate(address, new_entry);
|
||||
}
|
||||
|
||||
// Explicitly free up this address
|
||||
void deallocate(const Address& address);
|
||||
|
||||
// Returns with the physical address of the conflicting cache line
|
||||
Address cacheProbe(const Address& address) const;
|
||||
|
||||
// looks an address up in the cache
|
||||
AbstractCacheEntry* lookup(const Address& address);
|
||||
const AbstractCacheEntry* lookup(const Address& address) const;
|
||||
|
||||
int getLatency() const { return m_latency; }
|
||||
|
||||
// Hook for checkpointing the contents of the cache
|
||||
void recordCacheContents(int cntrl, CacheRecorder* tr) const;
|
||||
|
||||
// Set this address to most recently used
|
||||
void setMRU(const Address& address);
|
||||
|
||||
void profileMiss(const RubyRequest & msg);
|
||||
|
||||
void profileGenericRequest(GenericRequestType requestType,
|
||||
RubyAccessMode accessType,
|
||||
PrefetchBit pfBit);
|
||||
|
||||
void setLocked (const Address& addr, int context);
|
||||
void clearLocked (const Address& addr);
|
||||
bool isLocked (const Address& addr, int context);
|
||||
// Print cache contents
|
||||
void print(std::ostream& out) const;
|
||||
void printData(std::ostream& out) const;
|
||||
|
||||
void clearStats() const;
|
||||
void printStats(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
// convert a Address to its location in the cache
|
||||
Index addressToCacheSet(const Address& address) const;
|
||||
|
||||
// Given a cache tag: returns the index of the tag in a set.
|
||||
// returns -1 if the tag is not found.
|
||||
int findTagInSet(Index line, const Address& tag) const;
|
||||
int findTagInSetIgnorePermissions(Index cacheSet,
|
||||
const Address& tag) const;
|
||||
|
||||
// Private copy constructor and assignment operator
|
||||
CacheMemory(const CacheMemory& obj);
|
||||
CacheMemory& operator=(const CacheMemory& obj);
|
||||
|
||||
private:
|
||||
const std::string m_cache_name;
|
||||
int m_latency;
|
||||
|
||||
// Data Members (m_prefix)
|
||||
bool m_is_instruction_only_cache;
|
||||
|
||||
// The first index is the # of cache lines.
|
||||
// The second index is the the amount associativity.
|
||||
m5::hash_map<Address, int> m_tag_index;
|
||||
std::vector<std::vector<AbstractCacheEntry*> > m_cache;
|
||||
|
||||
AbstractReplacementPolicy *m_replacementPolicy_ptr;
|
||||
|
||||
CacheProfiler* m_profiler_ptr;
|
||||
|
||||
int m_cache_size;
|
||||
std::string m_policy;
|
||||
int m_cache_num_sets;
|
||||
int m_cache_num_set_bits;
|
||||
int m_cache_assoc;
|
||||
int m_start_index_bit;
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
|
||||
|
||||
175
simulators/gem5/src/mem/ruby/system/DMASequencer.cc
Normal file
175
simulators/gem5/src/mem/ruby/system/DMASequencer.cc
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "debug/RubyDma.hh"
|
||||
#include "mem/protocol/SequencerMsg.hh"
|
||||
#include "mem/protocol/SequencerRequestType.hh"
|
||||
#include "mem/ruby/buffers/MessageBuffer.hh"
|
||||
#include "mem/ruby/system/DMASequencer.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
|
||||
DMASequencer::DMASequencer(const Params *p)
|
||||
: RubyPort(p)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
DMASequencer::init()
|
||||
{
|
||||
RubyPort::init();
|
||||
m_is_busy = false;
|
||||
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
|
||||
}
|
||||
|
||||
RequestStatus
|
||||
DMASequencer::makeRequest(PacketPtr pkt)
|
||||
{
|
||||
if (m_is_busy) {
|
||||
return RequestStatus_BufferFull;
|
||||
}
|
||||
|
||||
uint64_t paddr = pkt->getAddr();
|
||||
uint8_t* data = pkt->getPtr<uint8_t>(true);
|
||||
int len = pkt->getSize();
|
||||
bool write = pkt->isWrite();
|
||||
|
||||
assert(!m_is_busy); // only support one outstanding DMA request
|
||||
m_is_busy = true;
|
||||
|
||||
active_request.start_paddr = paddr;
|
||||
active_request.write = write;
|
||||
active_request.data = data;
|
||||
active_request.len = len;
|
||||
active_request.bytes_completed = 0;
|
||||
active_request.bytes_issued = 0;
|
||||
active_request.pkt = pkt;
|
||||
|
||||
SequencerMsg *msg = new SequencerMsg;
|
||||
msg->getPhysicalAddress() = Address(paddr);
|
||||
msg->getLineAddress() = line_address(msg->getPhysicalAddress());
|
||||
msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
|
||||
int offset = paddr & m_data_block_mask;
|
||||
|
||||
msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
|
||||
len : RubySystem::getBlockSizeBytes() - offset;
|
||||
|
||||
if (write && (data != NULL)) {
|
||||
if (active_request.data != NULL) {
|
||||
msg->getDataBlk().setData(data, offset, msg->getLen());
|
||||
}
|
||||
}
|
||||
|
||||
assert(m_mandatory_q_ptr != NULL);
|
||||
m_mandatory_q_ptr->enqueue(msg);
|
||||
active_request.bytes_issued += msg->getLen();
|
||||
|
||||
return RequestStatus_Issued;
|
||||
}
|
||||
|
||||
void
|
||||
DMASequencer::issueNext()
|
||||
{
|
||||
assert(m_is_busy == true);
|
||||
active_request.bytes_completed = active_request.bytes_issued;
|
||||
if (active_request.len == active_request.bytes_completed) {
|
||||
//
|
||||
// Must unset the busy flag before calling back the dma port because
|
||||
// the callback may cause a previously nacked request to be reissued
|
||||
//
|
||||
DPRINTF(RubyDma, "DMA request completed\n");
|
||||
m_is_busy = false;
|
||||
ruby_hit_callback(active_request.pkt);
|
||||
return;
|
||||
}
|
||||
|
||||
SequencerMsg *msg = new SequencerMsg;
|
||||
msg->getPhysicalAddress() = Address(active_request.start_paddr +
|
||||
active_request.bytes_completed);
|
||||
|
||||
assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
|
||||
msg->getLineAddress() = line_address(msg->getPhysicalAddress());
|
||||
|
||||
msg->getType() = (active_request.write ? SequencerRequestType_ST :
|
||||
SequencerRequestType_LD);
|
||||
|
||||
msg->getLen() =
|
||||
(active_request.len -
|
||||
active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
|
||||
active_request.len - active_request.bytes_completed :
|
||||
RubySystem::getBlockSizeBytes());
|
||||
|
||||
if (active_request.write) {
|
||||
msg->getDataBlk().
|
||||
setData(&active_request.data[active_request.bytes_completed],
|
||||
0, msg->getLen());
|
||||
msg->getType() = SequencerRequestType_ST;
|
||||
} else {
|
||||
msg->getType() = SequencerRequestType_LD;
|
||||
}
|
||||
|
||||
assert(m_mandatory_q_ptr != NULL);
|
||||
m_mandatory_q_ptr->enqueue(msg);
|
||||
active_request.bytes_issued += msg->getLen();
|
||||
DPRINTF(RubyDma,
|
||||
"DMA request bytes issued %d, bytes completed %d, total len %d\n",
|
||||
active_request.bytes_issued, active_request.bytes_completed,
|
||||
active_request.len);
|
||||
}
|
||||
|
||||
void
|
||||
DMASequencer::dataCallback(const DataBlock & dblk)
|
||||
{
|
||||
assert(m_is_busy == true);
|
||||
int len = active_request.bytes_issued - active_request.bytes_completed;
|
||||
int offset = 0;
|
||||
if (active_request.bytes_completed == 0)
|
||||
offset = active_request.start_paddr & m_data_block_mask;
|
||||
assert(active_request.write == false);
|
||||
if (active_request.data != NULL) {
|
||||
memcpy(&active_request.data[active_request.bytes_completed],
|
||||
dblk.getData(offset, len), len);
|
||||
}
|
||||
issueNext();
|
||||
}
|
||||
|
||||
void
|
||||
DMASequencer::ackCallback()
|
||||
{
|
||||
issueNext();
|
||||
}
|
||||
|
||||
void
|
||||
DMASequencer::printConfig(std::ostream & out)
|
||||
{
|
||||
}
|
||||
|
||||
DMASequencer *
|
||||
DMASequencerParams::create()
|
||||
{
|
||||
return new DMASequencer(this);
|
||||
}
|
||||
78
simulators/gem5/src/mem/ruby/system/DMASequencer.hh
Normal file
78
simulators/gem5/src/mem/ruby/system/DMASequencer.hh
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_DMASEQUENCER_HH__
|
||||
#define __MEM_RUBY_SYSTEM_DMASEQUENCER_HH__
|
||||
|
||||
#include <ostream>
|
||||
|
||||
#include "mem/ruby/common/DataBlock.hh"
|
||||
#include "mem/ruby/system/RubyPort.hh"
|
||||
#include "params/DMASequencer.hh"
|
||||
|
||||
struct DMARequest
|
||||
{
|
||||
uint64_t start_paddr;
|
||||
int len;
|
||||
bool write;
|
||||
int bytes_completed;
|
||||
int bytes_issued;
|
||||
uint8* data;
|
||||
PacketPtr pkt;
|
||||
};
|
||||
|
||||
class DMASequencer : public RubyPort
|
||||
{
|
||||
public:
|
||||
typedef DMASequencerParams Params;
|
||||
DMASequencer(const Params *);
|
||||
void init();
|
||||
/* external interface */
|
||||
RequestStatus makeRequest(PacketPtr pkt);
|
||||
bool busy() { return m_is_busy;}
|
||||
int outstandingCount() const { return (m_is_busy ? 1 : 0); }
|
||||
bool isDeadlockEventScheduled() const { return false; }
|
||||
void descheduleDeadlockEvent() {}
|
||||
|
||||
/* SLICC callback */
|
||||
void dataCallback(const DataBlock & dblk);
|
||||
void ackCallback();
|
||||
|
||||
void printConfig(std::ostream & out);
|
||||
|
||||
private:
|
||||
void issueNext();
|
||||
|
||||
private:
|
||||
bool m_is_busy;
|
||||
uint64_t m_data_block_mask;
|
||||
DMARequest active_request;
|
||||
int num_active_requests;
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_DMASEQUENCER_HH__
|
||||
233
simulators/gem5/src/mem/ruby/system/DirectoryMemory.cc
Normal file
233
simulators/gem5/src/mem/ruby/system/DirectoryMemory.cc
Normal file
@ -0,0 +1,233 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "base/intmath.hh"
|
||||
#include "debug/RubyCache.hh"
|
||||
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
|
||||
#include "mem/ruby/system/DirectoryMemory.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
int DirectoryMemory::m_num_directories = 0;
|
||||
int DirectoryMemory::m_num_directories_bits = 0;
|
||||
uint64_t DirectoryMemory::m_total_size_bytes = 0;
|
||||
int DirectoryMemory::m_numa_high_bit = 0;
|
||||
|
||||
DirectoryMemory::DirectoryMemory(const Params *p)
|
||||
: SimObject(p)
|
||||
{
|
||||
m_version = p->version;
|
||||
m_size_bytes = p->size;
|
||||
m_size_bits = floorLog2(m_size_bytes);
|
||||
m_num_entries = 0;
|
||||
m_use_map = p->use_map;
|
||||
m_map_levels = p->map_levels;
|
||||
m_numa_high_bit = p->numa_high_bit;
|
||||
}
|
||||
|
||||
void
|
||||
DirectoryMemory::init()
|
||||
{
|
||||
m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes();
|
||||
|
||||
if (m_use_map) {
|
||||
m_sparseMemory = new SparseMemory(m_map_levels);
|
||||
g_system_ptr->registerSparseMemory(m_sparseMemory);
|
||||
} else {
|
||||
m_entries = new AbstractEntry*[m_num_entries];
|
||||
for (int i = 0; i < m_num_entries; i++)
|
||||
m_entries[i] = NULL;
|
||||
m_ram = g_system_ptr->getMemoryVector();
|
||||
}
|
||||
|
||||
m_num_directories++;
|
||||
m_num_directories_bits = floorLog2(m_num_directories);
|
||||
m_total_size_bytes += m_size_bytes;
|
||||
|
||||
if (m_numa_high_bit == 0) {
|
||||
m_numa_high_bit = RubySystem::getMemorySizeBits() - 1;
|
||||
}
|
||||
assert(m_numa_high_bit != 0);
|
||||
}
|
||||
|
||||
DirectoryMemory::~DirectoryMemory()
|
||||
{
|
||||
// free up all the directory entries
|
||||
if (m_entries != NULL) {
|
||||
for (uint64 i = 0; i < m_num_entries; i++) {
|
||||
if (m_entries[i] != NULL) {
|
||||
delete m_entries[i];
|
||||
}
|
||||
}
|
||||
delete [] m_entries;
|
||||
} else if (m_use_map) {
|
||||
delete m_sparseMemory;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
DirectoryMemory::printConfig(ostream& out) const
|
||||
{
|
||||
out << "DirectoryMemory module config: " << m_name << endl
|
||||
<< " version: " << m_version << endl
|
||||
<< " memory_bits: " << m_size_bits << endl
|
||||
<< " memory_size_bytes: " << m_size_bytes << endl
|
||||
<< " memory_size_Kbytes: " << double(m_size_bytes) / (1<<10) << endl
|
||||
<< " memory_size_Mbytes: " << double(m_size_bytes) / (1<<20) << endl
|
||||
<< " memory_size_Gbytes: " << double(m_size_bytes) / (1<<30) << endl;
|
||||
}
|
||||
|
||||
// Static method
|
||||
void
|
||||
DirectoryMemory::printGlobalConfig(ostream & out)
|
||||
{
|
||||
out << "DirectoryMemory Global Config: " << endl;
|
||||
out << " number of directory memories: " << m_num_directories << endl;
|
||||
if (m_num_directories > 1) {
|
||||
out << " number of selection bits: " << m_num_directories_bits << endl
|
||||
<< " selection bits: " << m_numa_high_bit
|
||||
<< "-" << m_numa_high_bit-m_num_directories_bits
|
||||
<< endl;
|
||||
}
|
||||
out << " total memory size bytes: " << m_total_size_bytes << endl;
|
||||
out << " total memory bits: " << floorLog2(m_total_size_bytes) << endl;
|
||||
}
|
||||
|
||||
uint64
|
||||
DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address)
|
||||
{
|
||||
if (m_num_directories_bits == 0)
|
||||
return 0;
|
||||
|
||||
uint64 ret = address.bitSelect(m_numa_high_bit - m_num_directories_bits + 1,
|
||||
m_numa_high_bit);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
DirectoryMemory::isPresent(PhysAddress address)
|
||||
{
|
||||
bool ret = (mapAddressToDirectoryVersion(address) == m_version);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64
|
||||
DirectoryMemory::mapAddressToLocalIdx(PhysAddress address)
|
||||
{
|
||||
uint64 ret;
|
||||
if (m_num_directories_bits > 0) {
|
||||
ret = address.bitRemove(m_numa_high_bit - m_num_directories_bits + 1,
|
||||
m_numa_high_bit);
|
||||
} else {
|
||||
ret = address.getAddress();
|
||||
}
|
||||
|
||||
return ret >> (RubySystem::getBlockSizeBits());
|
||||
}
|
||||
|
||||
AbstractEntry*
|
||||
DirectoryMemory::lookup(PhysAddress address)
|
||||
{
|
||||
assert(isPresent(address));
|
||||
DPRINTF(RubyCache, "Looking up address: %s\n", address);
|
||||
|
||||
if (m_use_map) {
|
||||
return m_sparseMemory->lookup(address);
|
||||
} else {
|
||||
uint64_t idx = mapAddressToLocalIdx(address);
|
||||
assert(idx < m_num_entries);
|
||||
return m_entries[idx];
|
||||
}
|
||||
}
|
||||
|
||||
AbstractEntry*
|
||||
DirectoryMemory::allocate(const PhysAddress& address, AbstractEntry* entry)
|
||||
{
|
||||
assert(isPresent(address));
|
||||
uint64 idx;
|
||||
DPRINTF(RubyCache, "Looking up address: %s\n", address);
|
||||
|
||||
if (m_use_map) {
|
||||
m_sparseMemory->add(address, entry);
|
||||
entry->changePermission(AccessPermission_Read_Write);
|
||||
} else {
|
||||
idx = mapAddressToLocalIdx(address);
|
||||
assert(idx < m_num_entries);
|
||||
entry->getDataBlk().assign(m_ram->getBlockPtr(address));
|
||||
entry->changePermission(AccessPermission_Read_Only);
|
||||
m_entries[idx] = entry;
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
void
|
||||
DirectoryMemory::invalidateBlock(PhysAddress address)
|
||||
{
|
||||
if (m_use_map) {
|
||||
assert(m_sparseMemory->exist(address));
|
||||
m_sparseMemory->remove(address);
|
||||
}
|
||||
#if 0
|
||||
else {
|
||||
assert(isPresent(address));
|
||||
|
||||
Index index = address.memoryModuleIndex();
|
||||
|
||||
if (index < 0 || index > m_size) {
|
||||
ERROR_MSG("Directory Memory Assertion: "
|
||||
"accessing memory out of range.");
|
||||
}
|
||||
|
||||
if (m_entries[index] != NULL){
|
||||
delete m_entries[index];
|
||||
m_entries[index] = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
DirectoryMemory::print(ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
DirectoryMemory::printStats(ostream& out) const
|
||||
{
|
||||
if (m_use_map) {
|
||||
m_sparseMemory->printStats(out);
|
||||
}
|
||||
}
|
||||
|
||||
DirectoryMemory *
|
||||
RubyDirectoryMemoryParams::create()
|
||||
{
|
||||
return new DirectoryMemory(this);
|
||||
}
|
||||
103
simulators/gem5/src/mem/ruby/system/DirectoryMemory.hh
Normal file
103
simulators/gem5/src/mem/ruby/system/DirectoryMemory.hh
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
|
||||
#include "mem/ruby/system/MemoryVector.hh"
|
||||
#include "mem/ruby/system/SparseMemory.hh"
|
||||
#include "params/RubyDirectoryMemory.hh"
|
||||
#include "sim/sim_object.hh"
|
||||
|
||||
class DirectoryMemory : public SimObject
|
||||
{
|
||||
public:
|
||||
typedef RubyDirectoryMemoryParams Params;
|
||||
DirectoryMemory(const Params *p);
|
||||
~DirectoryMemory();
|
||||
|
||||
void init();
|
||||
|
||||
uint64 mapAddressToLocalIdx(PhysAddress address);
|
||||
static uint64 mapAddressToDirectoryVersion(PhysAddress address);
|
||||
|
||||
bool isSparseImplementation() { return m_use_map; }
|
||||
uint64 getSize() { return m_size_bytes; }
|
||||
|
||||
void printConfig(std::ostream& out) const;
|
||||
static void printGlobalConfig(std::ostream & out);
|
||||
bool isPresent(PhysAddress address);
|
||||
AbstractEntry* lookup(PhysAddress address);
|
||||
AbstractEntry* allocate(const PhysAddress& address,
|
||||
AbstractEntry* new_entry);
|
||||
|
||||
void invalidateBlock(PhysAddress address);
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
void printStats(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
// Private copy constructor and assignment operator
|
||||
DirectoryMemory(const DirectoryMemory& obj);
|
||||
DirectoryMemory& operator=(const DirectoryMemory& obj);
|
||||
|
||||
private:
|
||||
const std::string m_name;
|
||||
AbstractEntry **m_entries;
|
||||
// int m_size; // # of memory module blocks this directory is
|
||||
// responsible for
|
||||
uint64 m_size_bytes;
|
||||
uint64 m_size_bits;
|
||||
uint64 m_num_entries;
|
||||
int m_version;
|
||||
|
||||
static int m_num_directories;
|
||||
static int m_num_directories_bits;
|
||||
static uint64_t m_total_size_bytes;
|
||||
static int m_numa_high_bit;
|
||||
|
||||
MemoryVector* m_ram;
|
||||
SparseMemory* m_sparseMemory;
|
||||
bool m_use_map;
|
||||
int m_map_levels;
|
||||
};
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const DirectoryMemory& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
|
||||
43
simulators/gem5/src/mem/ruby/system/DirectoryMemory.py
Normal file
43
simulators/gem5/src/mem/ruby/system/DirectoryMemory.py
Normal file
@ -0,0 +1,43 @@
|
||||
# Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Steve Reinhardt
|
||||
# Brad Beckmann
|
||||
|
||||
from m5.params import *
|
||||
from m5.proxy import *
|
||||
from m5.SimObject import SimObject
|
||||
|
||||
class RubyDirectoryMemory(SimObject):
|
||||
type = 'RubyDirectoryMemory'
|
||||
cxx_class = 'DirectoryMemory'
|
||||
version = Param.Int(0, "")
|
||||
size = Param.MemorySize("1GB", "capacity in bytes")
|
||||
use_map = Param.Bool(False, "enable sparse memory")
|
||||
map_levels = Param.Int(4, "sparse memory map levels")
|
||||
# the default value of the numa high bit is specified in the command line
|
||||
# option and must be passed into the directory memory sim object
|
||||
numa_high_bit = Param.Int("numa high bit")
|
||||
95
simulators/gem5/src/mem/ruby/system/LRUPolicy.hh
Normal file
95
simulators/gem5/src/mem/ruby/system/LRUPolicy.hh
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
|
||||
|
||||
#include "mem/ruby/system/AbstractReplacementPolicy.hh"
|
||||
|
||||
/* Simple true LRU replacement policy */
|
||||
|
||||
class LRUPolicy : public AbstractReplacementPolicy
|
||||
{
|
||||
public:
|
||||
LRUPolicy(Index num_sets, Index assoc);
|
||||
~LRUPolicy();
|
||||
|
||||
void touch(Index set, Index way, Time time);
|
||||
Index getVictim(Index set) const;
|
||||
};
|
||||
|
||||
inline
|
||||
LRUPolicy::LRUPolicy(Index num_sets, Index assoc)
|
||||
: AbstractReplacementPolicy(num_sets, assoc)
|
||||
{
|
||||
}
|
||||
|
||||
inline
|
||||
LRUPolicy::~LRUPolicy()
|
||||
{
|
||||
}
|
||||
|
||||
inline void
|
||||
LRUPolicy::touch(Index set, Index index, Time time)
|
||||
{
|
||||
assert(index >= 0 && index < m_assoc);
|
||||
assert(set >= 0 && set < m_num_sets);
|
||||
|
||||
m_last_ref_ptr[set][index] = time;
|
||||
}
|
||||
|
||||
inline Index
|
||||
LRUPolicy::getVictim(Index set) const
|
||||
{
|
||||
// assert(m_assoc != 0);
|
||||
Time time, smallest_time;
|
||||
Index smallest_index;
|
||||
|
||||
smallest_index = 0;
|
||||
smallest_time = m_last_ref_ptr[set][0];
|
||||
|
||||
for (unsigned i = 0; i < m_assoc; i++) {
|
||||
time = m_last_ref_ptr[set][i];
|
||||
// assert(m_cache[cacheSet][i].m_Permission !=
|
||||
// AccessPermission_NotPresent);
|
||||
|
||||
if (time < smallest_time) {
|
||||
smallest_index = i;
|
||||
smallest_time = time;
|
||||
}
|
||||
}
|
||||
|
||||
// DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
|
||||
// DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
|
||||
// DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
|
||||
// DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
|
||||
|
||||
return smallest_index;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
|
||||
79
simulators/gem5/src/mem/ruby/system/MachineID.hh
Normal file
79
simulators/gem5/src/mem/ruby/system/MachineID.hh
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_MACHINEID_HH__
|
||||
#define __MEM_RUBY_SYSTEM_MACHINEID_HH__
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "base/cprintf.hh"
|
||||
#include "mem/protocol/MachineType.hh"
|
||||
|
||||
struct MachineID
|
||||
{
|
||||
MachineType type;
|
||||
int num; // range: 0 ... number of this machine's components in system - 1
|
||||
};
|
||||
|
||||
inline std::string
|
||||
MachineIDToString(MachineID machine)
|
||||
{
|
||||
return csprintf("%s_%d", MachineType_to_string(machine.type), machine.num);
|
||||
}
|
||||
|
||||
inline bool
|
||||
operator==(const MachineID & obj1, const MachineID & obj2)
|
||||
{
|
||||
return (obj1.type == obj2.type && obj1.num == obj2.num);
|
||||
}
|
||||
|
||||
inline bool
|
||||
operator!=(const MachineID & obj1, const MachineID & obj2)
|
||||
{
|
||||
return (obj1.type != obj2.type || obj1.num != obj2.num);
|
||||
}
|
||||
|
||||
// Output operator declaration
|
||||
std::ostream& operator<<(std::ostream& out, const MachineID& obj);
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const MachineID& obj)
|
||||
{
|
||||
if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) {
|
||||
out << MachineType_to_string(obj.type);
|
||||
} else {
|
||||
out << "NULL";
|
||||
}
|
||||
out << "-";
|
||||
out << obj.num;
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_MACHINEID_HH__
|
||||
676
simulators/gem5/src/mem/ruby/system/MemoryControl.cc
Normal file
676
simulators/gem5/src/mem/ruby/system/MemoryControl.cc
Normal file
@ -0,0 +1,676 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Description: This module simulates a basic DDR-style memory controller
|
||||
* (and can easily be extended to do FB-DIMM as well).
|
||||
*
|
||||
* This module models a single channel, connected to any number of
|
||||
* DIMMs with any number of ranks of DRAMs each. If you want multiple
|
||||
* address/data channels, you need to instantiate multiple copies of
|
||||
* this module.
|
||||
*
|
||||
* Each memory request is placed in a queue associated with a specific
|
||||
* memory bank. This queue is of finite size; if the queue is full
|
||||
* the request will back up in an (infinite) common queue and will
|
||||
* effectively throttle the whole system. This sort of behavior is
|
||||
* intended to be closer to real system behavior than if we had an
|
||||
* infinite queue on each bank. If you want the latter, just make
|
||||
* the bank queues unreasonably large.
|
||||
*
|
||||
* The head item on a bank queue is issued when all of the
|
||||
* following are true:
|
||||
* the bank is available
|
||||
* the address path to the DIMM is available
|
||||
* the data path to or from the DIMM is available
|
||||
*
|
||||
* Note that we are not concerned about fixed offsets in time. The bank
|
||||
* will not be used at the same moment as the address path, but since
|
||||
* there is no queue in the DIMM or the DRAM it will be used at a constant
|
||||
* number of cycles later, so it is treated as if it is used at the same
|
||||
* time.
|
||||
*
|
||||
* We are assuming closed bank policy; that is, we automatically close
|
||||
* each bank after a single read or write. Adding an option for open
|
||||
* bank policy is for future work.
|
||||
*
|
||||
* We are assuming "posted CAS"; that is, we send the READ or WRITE
|
||||
* immediately after the ACTIVATE. This makes scheduling the address
|
||||
* bus trivial; we always schedule a fixed set of cycles. For DDR-400,
|
||||
* this is a set of two cycles; for some configurations such as
|
||||
* DDR-800 the parameter tRRD forces this to be set to three cycles.
|
||||
*
|
||||
* We assume a four-bit-time transfer on the data wires. This is
|
||||
* the minimum burst length for DDR-2. This would correspond
|
||||
* to (for example) a memory where each DIMM is 72 bits wide
|
||||
* and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
|
||||
* This gives us the same occupancy on the data wires as on the
|
||||
* address wires (for the two-address-cycle case).
|
||||
*
|
||||
* The only non-trivial scheduling problem is the data wires.
|
||||
* A write will use the wires earlier in the operation than a read
|
||||
* will; typically one cycle earlier as seen at the DRAM, but earlier
|
||||
* by a worst-case round-trip wire delay when seen at the memory controller.
|
||||
* So, while reads from one rank can be scheduled back-to-back
|
||||
* every two cycles, and writes (to any rank) scheduled every two cycles,
|
||||
* when a read is followed by a write we need to insert a bubble.
|
||||
* Furthermore, consecutive reads from two different ranks may need
|
||||
* to insert a bubble due to skew between when one DRAM stops driving the
|
||||
* wires and when the other one starts. (These bubbles are parameters.)
|
||||
*
|
||||
* This means that when some number of reads and writes are at the
|
||||
* heads of their queues, reads could starve writes, and/or reads
|
||||
* to the same rank could starve out other requests, since the others
|
||||
* would never see the data bus ready.
|
||||
* For this reason, we have implemented an anti-starvation feature.
|
||||
* A group of requests is marked "old", and a counter is incremented
|
||||
* each cycle as long as any request from that batch has not issued.
|
||||
* if the counter reaches twice the bank busy time, we hold off any
|
||||
* newer requests until all of the "old" requests have issued.
|
||||
*
|
||||
* We also model tFAW. This is an obscure DRAM parameter that says
|
||||
* that no more than four activate requests can happen within a window
|
||||
* of a certain size. For most configurations this does not come into play,
|
||||
* or has very little effect, but it could be used to throttle the power
|
||||
* consumption of the DRAM. In this implementation (unlike in a DRAM
|
||||
* data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
|
||||
* then no more than four activates may happen within any 16 cycle window.
|
||||
* Refreshes are included in the activates.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "base/cast.hh"
|
||||
#include "base/cprintf.hh"
|
||||
#include "mem/ruby/common/Consumer.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/network/Network.hh"
|
||||
#include "mem/ruby/profiler/Profiler.hh"
|
||||
#include "mem/ruby/slicc_interface/NetworkMessage.hh"
|
||||
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
|
||||
#include "mem/ruby/system/MemoryControl.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
class Consumer;
|
||||
|
||||
// Value to reset watchdog timer to.
|
||||
// If we're idle for this many memory control cycles,
|
||||
// shut down our clock (our rescheduling of ourselves).
|
||||
// Refresh shuts down as well.
|
||||
// When we restart, we'll be in a different phase
|
||||
// with respect to ruby cycles, so this introduces
|
||||
// a slight inaccuracy. But it is necessary or the
|
||||
// ruby tester never terminates because the event
|
||||
// queue is never empty.
|
||||
#define IDLECOUNT_MAX_VALUE 1000
|
||||
|
||||
// Output operator definition
|
||||
|
||||
ostream&
|
||||
operator<<(ostream& out, const MemoryControl& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
// ****************************************************************
|
||||
|
||||
// CONSTRUCTOR
|
||||
MemoryControl::MemoryControl(const Params *p)
|
||||
: SimObject(p), m_event(this)
|
||||
{
|
||||
m_mem_bus_cycle_multiplier = p->mem_bus_cycle_multiplier;
|
||||
m_banks_per_rank = p->banks_per_rank;
|
||||
m_ranks_per_dimm = p->ranks_per_dimm;
|
||||
m_dimms_per_channel = p->dimms_per_channel;
|
||||
m_bank_bit_0 = p->bank_bit_0;
|
||||
m_rank_bit_0 = p->rank_bit_0;
|
||||
m_dimm_bit_0 = p->dimm_bit_0;
|
||||
m_bank_queue_size = p->bank_queue_size;
|
||||
m_bank_busy_time = p->bank_busy_time;
|
||||
m_rank_rank_delay = p->rank_rank_delay;
|
||||
m_read_write_delay = p->read_write_delay;
|
||||
m_basic_bus_busy_time = p->basic_bus_busy_time;
|
||||
m_mem_ctl_latency = p->mem_ctl_latency;
|
||||
m_refresh_period = p->refresh_period;
|
||||
m_tFaw = p->tFaw;
|
||||
m_mem_random_arbitrate = p->mem_random_arbitrate;
|
||||
m_mem_fixed_delay = p->mem_fixed_delay;
|
||||
|
||||
m_profiler_ptr = new MemCntrlProfiler(name(),
|
||||
m_banks_per_rank,
|
||||
m_ranks_per_dimm,
|
||||
m_dimms_per_channel);
|
||||
}
|
||||
|
||||
void
|
||||
MemoryControl::init()
|
||||
{
|
||||
m_msg_counter = 0;
|
||||
|
||||
assert(m_tFaw <= 62); // must fit in a uint64 shift register
|
||||
|
||||
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
|
||||
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
|
||||
m_refresh_period_system = m_refresh_period / m_total_banks;
|
||||
|
||||
m_bankQueues = new list<MemoryNode> [m_total_banks];
|
||||
assert(m_bankQueues);
|
||||
|
||||
m_bankBusyCounter = new int [m_total_banks];
|
||||
assert(m_bankBusyCounter);
|
||||
|
||||
m_oldRequest = new int [m_total_banks];
|
||||
assert(m_oldRequest);
|
||||
|
||||
for (int i = 0; i < m_total_banks; i++) {
|
||||
m_bankBusyCounter[i] = 0;
|
||||
m_oldRequest[i] = 0;
|
||||
}
|
||||
|
||||
m_busBusyCounter_Basic = 0;
|
||||
m_busBusyCounter_Write = 0;
|
||||
m_busBusyCounter_ReadNewRank = 0;
|
||||
m_busBusy_WhichRank = 0;
|
||||
|
||||
m_roundRobin = 0;
|
||||
m_refresh_count = 1;
|
||||
m_need_refresh = 0;
|
||||
m_refresh_bank = 0;
|
||||
m_idleCount = 0;
|
||||
m_ageCounter = 0;
|
||||
|
||||
// Each tfaw shift register keeps a moving bit pattern
|
||||
// which shows when recent activates have occurred.
|
||||
// m_tfaw_count keeps track of how many 1 bits are set
|
||||
// in each shift register. When m_tfaw_count is >= 4,
|
||||
// new activates are not allowed.
|
||||
m_tfaw_shift = new uint64[m_total_ranks];
|
||||
m_tfaw_count = new int[m_total_ranks];
|
||||
for (int i = 0; i < m_total_ranks; i++) {
|
||||
m_tfaw_shift[i] = 0;
|
||||
m_tfaw_count[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryControl::~MemoryControl()
|
||||
{
|
||||
delete [] m_bankQueues;
|
||||
delete [] m_bankBusyCounter;
|
||||
delete [] m_oldRequest;
|
||||
delete m_profiler_ptr;
|
||||
}
|
||||
|
||||
// enqueue new request from directory
|
||||
void
|
||||
MemoryControl::enqueue(const MsgPtr& message, int latency)
|
||||
{
|
||||
Time current_time = g_eventQueue_ptr->getTime();
|
||||
Time arrival_time = current_time + latency;
|
||||
const MemoryMsg* memMess = safe_cast<const MemoryMsg*>(message.get());
|
||||
physical_address_t addr = memMess->getAddress().getAddress();
|
||||
MemoryRequestType type = memMess->getType();
|
||||
bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
|
||||
MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
|
||||
enqueueMemRef(thisReq);
|
||||
}
|
||||
|
||||
// Alternate entry point used when we already have a MemoryNode
|
||||
// structure built.
|
||||
void
|
||||
MemoryControl::enqueueMemRef(MemoryNode& memRef)
|
||||
{
|
||||
m_msg_counter++;
|
||||
memRef.m_msg_counter = m_msg_counter;
|
||||
physical_address_t addr = memRef.m_addr;
|
||||
int bank = getBank(addr);
|
||||
|
||||
DPRINTF(RubyMemory,
|
||||
"New memory request%7d: %#08x %c arrived at %10d bank = %3x sched %c\n",
|
||||
m_msg_counter, addr, memRef.m_is_mem_read ? 'R':'W',
|
||||
memRef.m_time * g_eventQueue_ptr->getClock(),
|
||||
bank, m_event.scheduled() ? 'Y':'N');
|
||||
|
||||
m_profiler_ptr->profileMemReq(bank);
|
||||
m_input_queue.push_back(memRef);
|
||||
|
||||
if (!m_event.scheduled()) {
|
||||
schedule(m_event, curTick() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// dequeue, peek, and isReady are used to transfer completed requests
|
||||
// back to the directory
|
||||
void
|
||||
MemoryControl::dequeue()
|
||||
{
|
||||
assert(isReady());
|
||||
m_response_queue.pop_front();
|
||||
}
|
||||
|
||||
const Message*
|
||||
MemoryControl::peek()
|
||||
{
|
||||
MemoryNode node = peekNode();
|
||||
Message* msg_ptr = node.m_msgptr.get();
|
||||
assert(msg_ptr != NULL);
|
||||
return msg_ptr;
|
||||
}
|
||||
|
||||
MemoryNode
|
||||
MemoryControl::peekNode()
|
||||
{
|
||||
assert(isReady());
|
||||
MemoryNode req = m_response_queue.front();
|
||||
DPRINTF(RubyMemory, "Peek: memory request%7d: %#08x %c sched %c\n",
|
||||
req.m_msg_counter, req.m_addr, req.m_is_mem_read ? 'R':'W',
|
||||
m_event.scheduled() ? 'Y':'N');
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
bool
|
||||
MemoryControl::isReady()
|
||||
{
|
||||
return ((!m_response_queue.empty()) &&
|
||||
(m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
|
||||
}
|
||||
|
||||
void
|
||||
MemoryControl::setConsumer(Consumer* consumer_ptr)
|
||||
{
|
||||
m_consumer_ptr = consumer_ptr;
|
||||
}
|
||||
|
||||
void
|
||||
MemoryControl::print(ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
MemoryControl::printConfig(ostream& out)
|
||||
{
|
||||
out << "Memory Control " << name() << ":" << endl;
|
||||
out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier
|
||||
<< endl;
|
||||
out << " Basic read latency: " << m_mem_ctl_latency << endl;
|
||||
if (m_mem_fixed_delay) {
|
||||
out << " Fixed Latency mode: Added cycles = " << m_mem_fixed_delay
|
||||
<< endl;
|
||||
} else {
|
||||
out << " Bank busy time: " << m_bank_busy_time << " memory cycles"
|
||||
<< endl;
|
||||
out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
|
||||
out << " Dead cycles between reads to different ranks: "
|
||||
<< m_rank_rank_delay << endl;
|
||||
out << " Dead cycle between a read and a write: "
|
||||
<< m_read_write_delay << endl;
|
||||
out << " tFaw (four-activate) window: " << m_tFaw << endl;
|
||||
}
|
||||
out << " Banks per rank: " << m_banks_per_rank << endl;
|
||||
out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
|
||||
out << " DIMMs per channel: " << m_dimms_per_channel << endl;
|
||||
out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
|
||||
out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
|
||||
out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
|
||||
out << " Max size of each bank queue: " << m_bank_queue_size << endl;
|
||||
out << " Refresh period (within one bank): " << m_refresh_period << endl;
|
||||
out << " Arbitration randomness: " << m_mem_random_arbitrate << endl;
|
||||
}
|
||||
|
||||
void
|
||||
MemoryControl::clearStats() const
|
||||
{
|
||||
m_profiler_ptr->clearStats();
|
||||
}
|
||||
|
||||
void
|
||||
MemoryControl::printStats(ostream& out) const
|
||||
{
|
||||
m_profiler_ptr->printStats(out);
|
||||
}
|
||||
|
||||
// Queue up a completed request to send back to directory
|
||||
void
|
||||
MemoryControl::enqueueToDirectory(MemoryNode req, int latency)
|
||||
{
|
||||
Time arrival_time = g_eventQueue_ptr->getTime()
|
||||
+ (latency * m_mem_bus_cycle_multiplier);
|
||||
req.m_time = arrival_time;
|
||||
m_response_queue.push_back(req);
|
||||
|
||||
DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n",
|
||||
req.m_addr, req.m_is_mem_read ? 'R':'W',
|
||||
arrival_time * g_eventQueue_ptr->getClock());
|
||||
|
||||
// schedule the wake up
|
||||
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
|
||||
}
|
||||
|
||||
// getBank returns an integer that is unique for each
|
||||
// bank across this memory controller.
|
||||
int
|
||||
MemoryControl::getBank(physical_address_t addr)
|
||||
{
|
||||
int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
|
||||
int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
|
||||
int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
|
||||
return (dimm * m_ranks_per_dimm * m_banks_per_rank)
|
||||
+ (rank * m_banks_per_rank)
|
||||
+ bank;
|
||||
}
|
||||
|
||||
// getRank returns an integer that is unique for each rank
|
||||
// and independent of individual bank.
|
||||
int
|
||||
MemoryControl::getRank(int bank)
|
||||
{
|
||||
int rank = (bank / m_banks_per_rank);
|
||||
assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
|
||||
return rank;
|
||||
}
|
||||
|
||||
// queueReady determines if the head item in a bank queue
|
||||
// can be issued this cycle
|
||||
bool
|
||||
MemoryControl::queueReady(int bank)
|
||||
{
|
||||
if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) {
|
||||
m_profiler_ptr->profileMemBankBusy();
|
||||
|
||||
DPRINTF(RubyMemory, "bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m_mem_random_arbitrate >= 2) {
|
||||
if ((random() % 100) < m_mem_random_arbitrate) {
|
||||
m_profiler_ptr->profileMemRandBusy();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (m_mem_fixed_delay)
|
||||
return true;
|
||||
|
||||
if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
|
||||
m_profiler_ptr->profileMemNotOld();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
|
||||
// Another bank must have issued this same cycle. For
|
||||
// profiling, we count this as an arb wait rather than a bus
|
||||
// wait. This is a little inaccurate since it MIGHT have also
|
||||
// been blocked waiting for a read-write or a read-read
|
||||
// instead, but it's pretty close.
|
||||
m_profiler_ptr->profileMemArbWait(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m_busBusyCounter_Basic > 0) {
|
||||
m_profiler_ptr->profileMemBusBusy();
|
||||
return false;
|
||||
}
|
||||
|
||||
int rank = getRank(bank);
|
||||
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
|
||||
m_profiler_ptr->profileMemTfawBusy();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool write = !m_bankQueues[bank].front().m_is_mem_read;
|
||||
if (write && (m_busBusyCounter_Write > 0)) {
|
||||
m_profiler_ptr->profileMemReadWriteBusy();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!write && (rank != m_busBusy_WhichRank)
|
||||
&& (m_busBusyCounter_ReadNewRank > 0)) {
|
||||
m_profiler_ptr->profileMemDataBusBusy();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// issueRefresh checks to see if this bank has a refresh scheduled
|
||||
// and, if so, does the refresh and returns true
|
||||
bool
|
||||
MemoryControl::issueRefresh(int bank)
|
||||
{
|
||||
if (!m_need_refresh || (m_refresh_bank != bank))
|
||||
return false;
|
||||
if (m_bankBusyCounter[bank] > 0)
|
||||
return false;
|
||||
// Note that m_busBusyCounter will prevent multiple issues during
|
||||
// the same cycle, as well as on different but close cycles:
|
||||
if (m_busBusyCounter_Basic > 0)
|
||||
return false;
|
||||
int rank = getRank(bank);
|
||||
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW)
|
||||
return false;
|
||||
|
||||
// Issue it:
|
||||
DPRINTF(RubyMemory, "Refresh bank %3x\n", bank);
|
||||
|
||||
m_profiler_ptr->profileMemRefresh();
|
||||
m_need_refresh--;
|
||||
m_refresh_bank++;
|
||||
if (m_refresh_bank >= m_total_banks)
|
||||
m_refresh_bank = 0;
|
||||
m_bankBusyCounter[bank] = m_bank_busy_time;
|
||||
m_busBusyCounter_Basic = m_basic_bus_busy_time;
|
||||
m_busBusyCounter_Write = m_basic_bus_busy_time;
|
||||
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
|
||||
markTfaw(rank);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Mark the activate in the tFaw shift register
|
||||
void
|
||||
MemoryControl::markTfaw(int rank)
|
||||
{
|
||||
if (m_tFaw) {
|
||||
m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
|
||||
m_tfaw_count[rank]++;
|
||||
}
|
||||
}
|
||||
|
||||
// Issue a memory request: Activate the bank, reserve the address and
|
||||
// data buses, and queue the request for return to the requesting
|
||||
// processor after a fixed latency.
|
||||
void
|
||||
MemoryControl::issueRequest(int bank)
|
||||
{
|
||||
int rank = getRank(bank);
|
||||
MemoryNode req = m_bankQueues[bank].front();
|
||||
m_bankQueues[bank].pop_front();
|
||||
|
||||
DPRINTF(RubyMemory, "Mem issue request%7d: %#08x %c "
|
||||
"bank=%3x sched %c\n", req.m_msg_counter, req.m_addr,
|
||||
req.m_is_mem_read? 'R':'W',
|
||||
bank, m_event.scheduled() ? 'Y':'N');
|
||||
|
||||
if (req.m_msgptr) { // don't enqueue L3 writebacks
|
||||
enqueueToDirectory(req, m_mem_ctl_latency + m_mem_fixed_delay);
|
||||
}
|
||||
m_oldRequest[bank] = 0;
|
||||
markTfaw(rank);
|
||||
m_bankBusyCounter[bank] = m_bank_busy_time;
|
||||
m_busBusy_WhichRank = rank;
|
||||
if (req.m_is_mem_read) {
|
||||
m_profiler_ptr->profileMemRead();
|
||||
m_busBusyCounter_Basic = m_basic_bus_busy_time;
|
||||
m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
|
||||
m_busBusyCounter_ReadNewRank =
|
||||
m_basic_bus_busy_time + m_rank_rank_delay;
|
||||
} else {
|
||||
m_profiler_ptr->profileMemWrite();
|
||||
m_busBusyCounter_Basic = m_basic_bus_busy_time;
|
||||
m_busBusyCounter_Write = m_basic_bus_busy_time;
|
||||
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
|
||||
}
|
||||
}
|
||||
|
||||
// executeCycle: This function is called once per memory clock cycle
|
||||
// to simulate all the periodic hardware.
|
||||
void
|
||||
MemoryControl::executeCycle()
|
||||
{
|
||||
// Keep track of time by counting down the busy counters:
|
||||
for (int bank=0; bank < m_total_banks; bank++) {
|
||||
if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
|
||||
}
|
||||
if (m_busBusyCounter_Write > 0)
|
||||
m_busBusyCounter_Write--;
|
||||
if (m_busBusyCounter_ReadNewRank > 0)
|
||||
m_busBusyCounter_ReadNewRank--;
|
||||
if (m_busBusyCounter_Basic > 0)
|
||||
m_busBusyCounter_Basic--;
|
||||
|
||||
// Count down the tFAW shift registers:
|
||||
for (int rank=0; rank < m_total_ranks; rank++) {
|
||||
if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
|
||||
m_tfaw_shift[rank] >>= 1;
|
||||
}
|
||||
|
||||
// After time period expires, latch an indication that we need a refresh.
|
||||
// Disable refresh if in mem_fixed_delay mode.
|
||||
if (!m_mem_fixed_delay) m_refresh_count--;
|
||||
if (m_refresh_count == 0) {
|
||||
m_refresh_count = m_refresh_period_system;
|
||||
|
||||
// Are we overrunning our ability to refresh?
|
||||
assert(m_need_refresh < 10);
|
||||
m_need_refresh++;
|
||||
}
|
||||
|
||||
// If this batch of requests is all done, make a new batch:
|
||||
m_ageCounter++;
|
||||
int anyOld = 0;
|
||||
for (int bank=0; bank < m_total_banks; bank++) {
|
||||
anyOld |= m_oldRequest[bank];
|
||||
}
|
||||
if (!anyOld) {
|
||||
for (int bank=0; bank < m_total_banks; bank++) {
|
||||
if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
|
||||
}
|
||||
m_ageCounter = 0;
|
||||
}
|
||||
|
||||
// If randomness desired, re-randomize round-robin position each cycle
|
||||
if (m_mem_random_arbitrate) {
|
||||
m_roundRobin = random() % m_total_banks;
|
||||
}
|
||||
|
||||
// For each channel, scan round-robin, and pick an old, ready
|
||||
// request and issue it. Treat a refresh request as if it were at
|
||||
// the head of its bank queue. After we issue something, keep
|
||||
// scanning the queues just to gather statistics about how many
|
||||
// are waiting. If in mem_fixed_delay mode, we can issue more
|
||||
// than one request per cycle.
|
||||
int queueHeads = 0;
|
||||
int banksIssued = 0;
|
||||
for (int i = 0; i < m_total_banks; i++) {
|
||||
m_roundRobin++;
|
||||
if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
|
||||
issueRefresh(m_roundRobin);
|
||||
int qs = m_bankQueues[m_roundRobin].size();
|
||||
if (qs > 1) {
|
||||
m_profiler_ptr->profileMemBankQ(qs-1);
|
||||
}
|
||||
if (qs > 0) {
|
||||
// we're not idle if anything is queued
|
||||
m_idleCount = IDLECOUNT_MAX_VALUE;
|
||||
queueHeads++;
|
||||
if (queueReady(m_roundRobin)) {
|
||||
issueRequest(m_roundRobin);
|
||||
banksIssued++;
|
||||
if (m_mem_fixed_delay) {
|
||||
m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// memWaitCycles is a redundant catch-all for the specific
|
||||
// counters in queueReady
|
||||
m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued);
|
||||
|
||||
// Check input queue and move anything to bank queues if not full.
|
||||
// Since this is done here at the end of the cycle, there will
|
||||
// always be at least one cycle of latency in the bank queue. We
|
||||
// deliberately move at most one request per cycle (to simulate
|
||||
// typical hardware). Note that if one bank queue fills up, other
|
||||
// requests can get stuck behind it here.
|
||||
if (!m_input_queue.empty()) {
|
||||
// we're not idle if anything is pending
|
||||
m_idleCount = IDLECOUNT_MAX_VALUE;
|
||||
MemoryNode req = m_input_queue.front();
|
||||
int bank = getBank(req.m_addr);
|
||||
if (m_bankQueues[bank].size() < m_bank_queue_size) {
|
||||
m_input_queue.pop_front();
|
||||
m_bankQueues[bank].push_back(req);
|
||||
}
|
||||
m_profiler_ptr->profileMemInputQ(m_input_queue.size());
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int
|
||||
MemoryControl::drain(Event *de)
|
||||
{
|
||||
DPRINTF(RubyMemory, "MemoryController drain\n");
|
||||
if(m_event.scheduled()) {
|
||||
deschedule(m_event);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// wakeup: This function is called once per memory controller clock cycle.
|
||||
void
|
||||
MemoryControl::wakeup()
|
||||
{
|
||||
DPRINTF(RubyMemory, "MemoryController wakeup\n");
|
||||
// execute everything
|
||||
executeCycle();
|
||||
|
||||
m_idleCount--;
|
||||
if (m_idleCount > 0) {
|
||||
assert(!m_event.scheduled());
|
||||
schedule(m_event, curTick() + m_mem_bus_cycle_multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryControl *
|
||||
RubyMemoryControlParams::create()
|
||||
{
|
||||
return new MemoryControl(this);
|
||||
}
|
||||
|
||||
181
simulators/gem5/src/mem/ruby/system/MemoryControl.hh
Normal file
181
simulators/gem5/src/mem/ruby/system/MemoryControl.hh
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
|
||||
#define __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
|
||||
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <string>
|
||||
|
||||
#include "mem/protocol/MemoryMsg.hh"
|
||||
#include "mem/ruby/common/Consumer.hh"
|
||||
#include "mem/ruby/profiler/MemCntrlProfiler.hh"
|
||||
#include "mem/ruby/slicc_interface/Message.hh"
|
||||
#include "mem/ruby/system/AbstractMemOrCache.hh"
|
||||
#include "mem/ruby/system/MemoryNode.hh"
|
||||
#include "params/RubyMemoryControl.hh"
|
||||
#include "sim/sim_object.hh"
|
||||
|
||||
// This constant is part of the definition of tFAW; see
|
||||
// the comments in header to MemoryControl.cc
|
||||
#define ACTIVATE_PER_TFAW 4
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class Consumer;
|
||||
|
||||
class MemoryControl :
|
||||
public SimObject, public Consumer, public AbstractMemOrCache
|
||||
{
|
||||
public:
|
||||
|
||||
typedef RubyMemoryControlParams Params;
|
||||
MemoryControl(const Params *p);
|
||||
void init();
|
||||
|
||||
~MemoryControl();
|
||||
|
||||
unsigned int drain(Event *de);
|
||||
|
||||
void wakeup();
|
||||
|
||||
void setConsumer(Consumer* consumer_ptr);
|
||||
Consumer* getConsumer() { return m_consumer_ptr; };
|
||||
void setDescription(const std::string& name) { m_description = name; };
|
||||
std::string getDescription() { return m_description; };
|
||||
|
||||
// Called from the directory:
|
||||
void enqueue(const MsgPtr& message, int latency );
|
||||
void enqueueMemRef(MemoryNode& memRef);
|
||||
void dequeue();
|
||||
const Message* peek();
|
||||
MemoryNode peekNode();
|
||||
bool isReady();
|
||||
bool areNSlotsAvailable(int n) { return true; }; // infinite queue length
|
||||
|
||||
//// Called from L3 cache:
|
||||
//void writeBack(physical_address_t addr);
|
||||
|
||||
void printConfig(std::ostream& out);
|
||||
void print(std::ostream& out) const;
|
||||
void clearStats() const;
|
||||
void printStats(std::ostream& out) const;
|
||||
|
||||
//added by SS
|
||||
int getBanksPerRank() { return m_banks_per_rank; };
|
||||
int getRanksPerDimm() { return m_ranks_per_dimm; };
|
||||
int getDimmsPerChannel() { return m_dimms_per_channel; }
|
||||
|
||||
private:
|
||||
class MemCntrlEvent : public Event
|
||||
{
|
||||
public:
|
||||
MemCntrlEvent(MemoryControl* _mem_cntrl)
|
||||
{
|
||||
mem_cntrl = _mem_cntrl;
|
||||
}
|
||||
private:
|
||||
void process() { mem_cntrl->wakeup(); }
|
||||
|
||||
MemoryControl* mem_cntrl;
|
||||
};
|
||||
|
||||
void enqueueToDirectory(MemoryNode req, int latency);
|
||||
int getBank(physical_address_t addr);
|
||||
int getRank(int bank);
|
||||
bool queueReady(int bank);
|
||||
void issueRequest(int bank);
|
||||
bool issueRefresh(int bank);
|
||||
void markTfaw(int rank);
|
||||
void executeCycle();
|
||||
|
||||
// Private copy constructor and assignment operator
|
||||
MemoryControl (const MemoryControl& obj);
|
||||
MemoryControl& operator=(const MemoryControl& obj);
|
||||
|
||||
// data members
|
||||
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
|
||||
std::string m_description;
|
||||
int m_msg_counter;
|
||||
|
||||
int m_mem_bus_cycle_multiplier;
|
||||
int m_banks_per_rank;
|
||||
int m_ranks_per_dimm;
|
||||
int m_dimms_per_channel;
|
||||
int m_bank_bit_0;
|
||||
int m_rank_bit_0;
|
||||
int m_dimm_bit_0;
|
||||
unsigned int m_bank_queue_size;
|
||||
int m_bank_busy_time;
|
||||
int m_rank_rank_delay;
|
||||
int m_read_write_delay;
|
||||
int m_basic_bus_busy_time;
|
||||
int m_mem_ctl_latency;
|
||||
int m_refresh_period;
|
||||
int m_mem_random_arbitrate;
|
||||
int m_tFaw;
|
||||
int m_mem_fixed_delay;
|
||||
|
||||
int m_total_banks;
|
||||
int m_total_ranks;
|
||||
int m_refresh_period_system;
|
||||
|
||||
// queues where memory requests live
|
||||
std::list<MemoryNode> m_response_queue;
|
||||
std::list<MemoryNode> m_input_queue;
|
||||
std::list<MemoryNode>* m_bankQueues;
|
||||
|
||||
// Each entry indicates number of address-bus cycles until bank
|
||||
// is reschedulable:
|
||||
int* m_bankBusyCounter;
|
||||
int* m_oldRequest;
|
||||
|
||||
uint64* m_tfaw_shift;
|
||||
int* m_tfaw_count;
|
||||
|
||||
// Each of these indicates number of address-bus cycles until
|
||||
// we can issue a new request of the corresponding type:
|
||||
int m_busBusyCounter_Write;
|
||||
int m_busBusyCounter_ReadNewRank;
|
||||
int m_busBusyCounter_Basic;
|
||||
|
||||
int m_busBusy_WhichRank; // which rank last granted
|
||||
int m_roundRobin; // which bank queue was last granted
|
||||
int m_refresh_count; // cycles until next refresh
|
||||
int m_need_refresh; // set whenever m_refresh_count goes to zero
|
||||
int m_refresh_bank; // which bank to refresh next
|
||||
int m_ageCounter; // age of old requests; to detect starvation
|
||||
int m_idleCount; // watchdog timer for shutting down
|
||||
|
||||
MemCntrlProfiler* m_profiler_ptr;
|
||||
|
||||
MemCntrlEvent m_event;
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
|
||||
53
simulators/gem5/src/mem/ruby/system/MemoryControl.py
Normal file
53
simulators/gem5/src/mem/ruby/system/MemoryControl.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Steve Reinhardt
|
||||
# Brad Beckmann
|
||||
|
||||
from m5.params import *
|
||||
from m5.SimObject import SimObject
|
||||
|
||||
class RubyMemoryControl(SimObject):
|
||||
type = 'RubyMemoryControl'
|
||||
cxx_class = 'MemoryControl'
|
||||
version = Param.Int("");
|
||||
mem_bus_cycle_multiplier = Param.Int(10, "");
|
||||
banks_per_rank = Param.Int(8, "");
|
||||
ranks_per_dimm = Param.Int(2, "");
|
||||
dimms_per_channel = Param.Int(2, "");
|
||||
bank_bit_0 = Param.Int(8, "");
|
||||
rank_bit_0 = Param.Int(11, "");
|
||||
dimm_bit_0 = Param.Int(12, "");
|
||||
bank_queue_size = Param.Int(12, "");
|
||||
bank_busy_time = Param.Int(11, "");
|
||||
rank_rank_delay = Param.Int(1, "");
|
||||
read_write_delay = Param.Int(2, "");
|
||||
basic_bus_busy_time = Param.Int(2, "");
|
||||
mem_ctl_latency = Param.Int(12, "");
|
||||
refresh_period = Param.Int(1560, "");
|
||||
tFaw = Param.Int(0, "");
|
||||
mem_random_arbitrate = Param.Int(0, "");
|
||||
mem_fixed_delay = Param.Int(0, "");
|
||||
41
simulators/gem5/src/mem/ruby/system/MemoryNode.cc
Normal file
41
simulators/gem5/src/mem/ruby/system/MemoryNode.cc
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (c) 1999 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "mem/ruby/system/MemoryNode.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
void
|
||||
MemoryNode::print(ostream& out) const
|
||||
{
|
||||
out << "[";
|
||||
out << m_time << ", ";
|
||||
out << m_msg_counter << ", ";
|
||||
out << m_msgptr << "; ";
|
||||
out << "]";
|
||||
}
|
||||
93
simulators/gem5/src/mem/ruby/system/MemoryNode.hh
Normal file
93
simulators/gem5/src/mem/ruby/system/MemoryNode.hh
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Description:
|
||||
* This structure records everything known about a single
|
||||
* memory request that is queued in the memory controller.
|
||||
* It is created when the memory request first arrives
|
||||
* at a memory controller and is deleted when the underlying
|
||||
* message is enqueued to be sent back to the directory.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
|
||||
#define __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "mem/protocol/MemoryRequestType.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/slicc_interface/Message.hh"
|
||||
|
||||
class MemoryNode
|
||||
{
|
||||
public:
|
||||
// old constructor
|
||||
MemoryNode(const Time& time, int counter, const MsgPtr& msgptr,
|
||||
const physical_address_t addr, const bool is_mem_read)
|
||||
{
|
||||
m_time = time;
|
||||
m_msg_counter = counter;
|
||||
m_msgptr = msgptr;
|
||||
m_addr = addr;
|
||||
m_is_mem_read = is_mem_read;
|
||||
m_is_dirty_wb = !is_mem_read;
|
||||
}
|
||||
|
||||
// new constructor
|
||||
MemoryNode(const Time& time, const MsgPtr& msgptr,
|
||||
const physical_address_t addr, const bool is_mem_read,
|
||||
const bool is_dirty_wb)
|
||||
{
|
||||
m_time = time;
|
||||
m_msg_counter = 0;
|
||||
m_msgptr = msgptr;
|
||||
m_addr = addr;
|
||||
m_is_mem_read = is_mem_read;
|
||||
m_is_dirty_wb = is_dirty_wb;
|
||||
}
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
Time m_time;
|
||||
int m_msg_counter;
|
||||
MsgPtr m_msgptr;
|
||||
physical_address_t m_addr;
|
||||
bool m_is_mem_read;
|
||||
bool m_is_dirty_wb;
|
||||
};
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const MemoryNode& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
|
||||
237
simulators/gem5/src/mem/ruby/system/MemoryVector.hh
Normal file
237
simulators/gem5/src/mem/ruby/system/MemoryVector.hh
Normal file
@ -0,0 +1,237 @@
|
||||
/*
|
||||
* Copyright (c) 2009 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
|
||||
#define __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
|
||||
|
||||
#include "base/trace.hh"
|
||||
#include "debug/RubyCacheTrace.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
|
||||
class DirectoryMemory;
|
||||
|
||||
/**
|
||||
* MemoryVector holds memory data (DRAM only)
|
||||
*/
|
||||
class MemoryVector
|
||||
{
|
||||
public:
|
||||
MemoryVector();
|
||||
MemoryVector(uint32 size);
|
||||
~MemoryVector();
|
||||
friend class DirectoryMemory;
|
||||
|
||||
void resize(uint32 size); // destructive
|
||||
|
||||
void write(const Address & paddr, uint8* data, int len);
|
||||
uint8* read(const Address & paddr, uint8* data, int len);
|
||||
uint32 collatePages(uint8* &raw_data);
|
||||
void populatePages(uint8* raw_data);
|
||||
|
||||
private:
|
||||
uint8* getBlockPtr(const PhysAddress & addr);
|
||||
|
||||
uint32 m_size;
|
||||
uint8** m_pages;
|
||||
uint32 m_num_pages;
|
||||
const uint32 m_page_offset_mask;
|
||||
static const uint32 PAGE_SIZE = 4096;
|
||||
};
|
||||
|
||||
inline
|
||||
MemoryVector::MemoryVector()
|
||||
: m_page_offset_mask(4095)
|
||||
{
|
||||
m_size = 0;
|
||||
m_num_pages = 0;
|
||||
m_pages = NULL;
|
||||
}
|
||||
|
||||
inline
|
||||
MemoryVector::MemoryVector(uint32 size)
|
||||
: m_page_offset_mask(4095)
|
||||
{
|
||||
resize(size);
|
||||
}
|
||||
|
||||
inline
|
||||
MemoryVector::~MemoryVector()
|
||||
{
|
||||
for (int i = 0; i < m_num_pages; i++) {
|
||||
if (m_pages[i] != 0) {
|
||||
delete [] m_pages[i];
|
||||
}
|
||||
}
|
||||
delete [] m_pages;
|
||||
}
|
||||
|
||||
inline void
|
||||
MemoryVector::resize(uint32 size)
|
||||
{
|
||||
if (m_pages != NULL){
|
||||
for (int i = 0; i < m_num_pages; i++) {
|
||||
if (m_pages[i] != 0) {
|
||||
delete [] m_pages[i];
|
||||
}
|
||||
}
|
||||
delete [] m_pages;
|
||||
}
|
||||
m_size = size;
|
||||
assert(size%PAGE_SIZE == 0);
|
||||
m_num_pages = size >> 12;
|
||||
m_pages = new uint8*[m_num_pages];
|
||||
memset(m_pages, 0, m_num_pages * sizeof(uint8*));
|
||||
}
|
||||
|
||||
inline void
|
||||
MemoryVector::write(const Address & paddr, uint8* data, int len)
|
||||
{
|
||||
assert(paddr.getAddress() + len <= m_size);
|
||||
uint32 page_num = paddr.getAddress() >> 12;
|
||||
if (m_pages[page_num] == 0) {
|
||||
bool all_zeros = true;
|
||||
for (int i = 0; i < len;i++) {
|
||||
if (data[i] != 0) {
|
||||
all_zeros = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (all_zeros)
|
||||
return;
|
||||
m_pages[page_num] = new uint8[PAGE_SIZE];
|
||||
memset(m_pages[page_num], 0, PAGE_SIZE);
|
||||
uint32 offset = paddr.getAddress() & m_page_offset_mask;
|
||||
memcpy(&m_pages[page_num][offset], data, len);
|
||||
} else {
|
||||
memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
|
||||
data, len);
|
||||
}
|
||||
}
|
||||
|
||||
inline uint8*
|
||||
MemoryVector::read(const Address & paddr, uint8* data, int len)
|
||||
{
|
||||
assert(paddr.getAddress() + len <= m_size);
|
||||
uint32 page_num = paddr.getAddress() >> 12;
|
||||
if (m_pages[page_num] == 0) {
|
||||
memset(data, 0, len);
|
||||
} else {
|
||||
memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
|
||||
len);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
inline uint8*
|
||||
MemoryVector::getBlockPtr(const PhysAddress & paddr)
|
||||
{
|
||||
uint32 page_num = paddr.getAddress() >> 12;
|
||||
if (m_pages[page_num] == 0) {
|
||||
m_pages[page_num] = new uint8[PAGE_SIZE];
|
||||
memset(m_pages[page_num], 0, PAGE_SIZE);
|
||||
}
|
||||
return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask];
|
||||
}
|
||||
|
||||
/*!
|
||||
* Function for collating all the pages of the physical memory together.
|
||||
* In case a pointer for a page is NULL, this page needs only a single byte
|
||||
* to represent that the pointer is NULL. Otherwise, it needs 1 + PAGE_SIZE
|
||||
* bytes. The first represents that the page pointer is not NULL, and rest of
|
||||
* the bytes represent the data on the page.
|
||||
*/
|
||||
|
||||
inline uint32
|
||||
MemoryVector::collatePages(uint8* &raw_data)
|
||||
{
|
||||
uint32 num_zero_pages = 0;
|
||||
uint32 data_size = 0;
|
||||
|
||||
for (uint32 i = 0;i < m_num_pages; ++i)
|
||||
{
|
||||
if (m_pages[i] == 0) num_zero_pages++;
|
||||
}
|
||||
|
||||
raw_data = new uint8[ sizeof(uint32) /* number of pages*/
|
||||
+ m_num_pages /* whether the page is all zeros */
|
||||
+ PAGE_SIZE * (m_num_pages - num_zero_pages)];
|
||||
|
||||
/* Write the number of pages to be stored. */
|
||||
memcpy(raw_data, &m_num_pages, sizeof(uint32));
|
||||
data_size = sizeof(uint32);
|
||||
|
||||
DPRINTF(RubyCacheTrace, "collating %d pages\n", m_num_pages);
|
||||
|
||||
for (uint32 i = 0;i < m_num_pages; ++i)
|
||||
{
|
||||
if (m_pages[i] == 0) {
|
||||
raw_data[data_size] = 0;
|
||||
} else {
|
||||
raw_data[data_size] = 1;
|
||||
memcpy(raw_data + data_size + 1, m_pages[i], PAGE_SIZE);
|
||||
data_size += PAGE_SIZE;
|
||||
}
|
||||
data_size += 1;
|
||||
}
|
||||
|
||||
return data_size;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Function for populating the pages of the memory using the available raw
|
||||
* data. Each page has a byte associate with it, which represents whether the
|
||||
* page was NULL or not, when all the pages were collated. The function assumes
|
||||
* that the number of pages in the memory are same as those that were recorded
|
||||
* in the checkpoint.
|
||||
*/
|
||||
inline void
|
||||
MemoryVector::populatePages(uint8* raw_data)
|
||||
{
|
||||
uint32 data_size = 0;
|
||||
uint32 num_pages = 0;
|
||||
|
||||
/* Read the number of pages that were stored. */
|
||||
memcpy(&num_pages, raw_data, sizeof(uint32));
|
||||
data_size = sizeof(uint32);
|
||||
assert(num_pages == m_num_pages);
|
||||
|
||||
DPRINTF(RubyCacheTrace, "Populating %d pages\n", num_pages);
|
||||
|
||||
for (uint32 i = 0;i < m_num_pages; ++i)
|
||||
{
|
||||
assert(m_pages[i] == 0);
|
||||
if (raw_data[data_size] != 0) {
|
||||
m_pages[i] = new uint8[PAGE_SIZE];
|
||||
memcpy(m_pages[i], raw_data + data_size + 1, PAGE_SIZE);
|
||||
data_size += PAGE_SIZE;
|
||||
}
|
||||
data_size += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
|
||||
200
simulators/gem5/src/mem/ruby/system/PerfectCacheMemory.hh
Normal file
200
simulators/gem5/src/mem/ruby/system/PerfectCacheMemory.hh
Normal file
@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
|
||||
|
||||
#include "base/hashmap.hh"
|
||||
#include "mem/protocol/AccessPermission.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
|
||||
template<class ENTRY>
|
||||
struct PerfectCacheLineState
|
||||
{
|
||||
PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
|
||||
AccessPermission m_permission;
|
||||
ENTRY m_entry;
|
||||
};
|
||||
|
||||
template<class ENTRY>
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const PerfectCacheLineState<ENTRY>& obj)
|
||||
{
|
||||
return out;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
class PerfectCacheMemory
|
||||
{
|
||||
public:
|
||||
PerfectCacheMemory();
|
||||
|
||||
static void printConfig(std::ostream& out);
|
||||
|
||||
// tests to see if an address is present in the cache
|
||||
bool isTagPresent(const Address& address) const;
|
||||
|
||||
// Returns true if there is:
|
||||
// a) a tag match on this address or there is
|
||||
// b) an Invalid line in the same cache "way"
|
||||
bool cacheAvail(const Address& address) const;
|
||||
|
||||
// find an Invalid entry and sets the tag appropriate for the address
|
||||
void allocate(const Address& address);
|
||||
|
||||
void deallocate(const Address& address);
|
||||
|
||||
// Returns with the physical address of the conflicting cache line
|
||||
Address cacheProbe(const Address& newAddress) const;
|
||||
|
||||
// looks an address up in the cache
|
||||
ENTRY& lookup(const Address& address);
|
||||
const ENTRY& lookup(const Address& address) const;
|
||||
|
||||
// Get/Set permission of cache block
|
||||
AccessPermission getPermission(const Address& address) const;
|
||||
void changePermission(const Address& address, AccessPermission new_perm);
|
||||
|
||||
// Print cache contents
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
// Private copy constructor and assignment operator
|
||||
PerfectCacheMemory(const PerfectCacheMemory& obj);
|
||||
PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
|
||||
|
||||
// Data Members (m_prefix)
|
||||
m5::hash_map<Address, PerfectCacheLineState<ENTRY> > m_map;
|
||||
};
|
||||
|
||||
template<class ENTRY>
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const PerfectCacheMemory<ENTRY>& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline
|
||||
PerfectCacheMemory<ENTRY>::PerfectCacheMemory()
|
||||
{
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
PerfectCacheMemory<ENTRY>::printConfig(std::ostream& out)
|
||||
{
|
||||
}
|
||||
|
||||
// tests to see if an address is present in the cache
|
||||
template<class ENTRY>
|
||||
inline bool
|
||||
PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
|
||||
{
|
||||
return m_map.count(line_address(address)) > 0;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline bool
|
||||
PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// find an Invalid or already allocated entry and sets the tag
|
||||
// appropriate for the address
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
PerfectCacheMemory<ENTRY>::allocate(const Address& address)
|
||||
{
|
||||
PerfectCacheLineState<ENTRY> line_state;
|
||||
line_state.m_permission = AccessPermission_Invalid;
|
||||
line_state.m_entry = ENTRY();
|
||||
m_map[line_address(address)] = line_state;
|
||||
}
|
||||
|
||||
// deallocate entry
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
|
||||
{
|
||||
m_map.erase(line_address(address));
|
||||
}
|
||||
|
||||
// Returns with the physical address of the conflicting cache line
|
||||
template<class ENTRY>
|
||||
inline Address
|
||||
PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
|
||||
{
|
||||
panic("cacheProbe called in perfect cache");
|
||||
return newAddress;
|
||||
}
|
||||
|
||||
// looks an address up in the cache
|
||||
template<class ENTRY>
|
||||
inline ENTRY&
|
||||
PerfectCacheMemory<ENTRY>::lookup(const Address& address)
|
||||
{
|
||||
return m_map[line_address(address)].m_entry;
|
||||
}
|
||||
|
||||
// looks an address up in the cache
|
||||
template<class ENTRY>
|
||||
inline const ENTRY&
|
||||
PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
|
||||
{
|
||||
return m_map[line_address(address)].m_entry;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline AccessPermission
|
||||
PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
|
||||
{
|
||||
return m_map[line_address(address)].m_permission;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
PerfectCacheMemory<ENTRY>::changePermission(const Address& address,
|
||||
AccessPermission new_perm)
|
||||
{
|
||||
Address line_address = address;
|
||||
line_address.makeLineAddress();
|
||||
PerfectCacheLineState<ENTRY>& line_state = m_map[line_address];
|
||||
line_state.m_permission = new_perm;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
PerfectCacheMemory<ENTRY>::print(std::ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
|
||||
219
simulators/gem5/src/mem/ruby/system/PersistentTable.cc
Normal file
219
simulators/gem5/src/mem/ruby/system/PersistentTable.cc
Normal file
@ -0,0 +1,219 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "mem/ruby/system/PersistentTable.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
// randomize so that handoffs are not locality-aware
|
||||
#if 0
|
||||
int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6,
|
||||
10, 14, 3, 7, 11, 15};
|
||||
int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 11, 12, 13, 14, 15};
|
||||
#endif
|
||||
|
||||
PersistentTable::PersistentTable()
|
||||
{
|
||||
}
|
||||
|
||||
PersistentTable::~PersistentTable()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
PersistentTable::persistentRequestLock(const Address& address,
|
||||
MachineID locker,
|
||||
AccessType type)
|
||||
{
|
||||
#if 0
|
||||
if (locker == m_chip_ptr->getID())
|
||||
cout << "Chip " << m_chip_ptr->getID() << ": " << llocker
|
||||
<< " requesting lock for " << address << endl;
|
||||
|
||||
MachineID locker = (MachineID) persistent_randomize[llocker];
|
||||
#endif
|
||||
|
||||
assert(address == line_address(address));
|
||||
|
||||
static const PersistentTableEntry dflt;
|
||||
pair<AddressMap::iterator, bool> r =
|
||||
m_map.insert(AddressMap::value_type(address, dflt));
|
||||
bool present = !r.second;
|
||||
AddressMap::iterator i = r.first;
|
||||
PersistentTableEntry &entry = i->second;
|
||||
|
||||
if (present) {
|
||||
// Make sure we're not already in the locked set
|
||||
assert(!(entry.m_starving.isElement(locker)));
|
||||
}
|
||||
|
||||
entry.m_starving.add(locker);
|
||||
if (type == AccessType_Write)
|
||||
entry.m_request_to_write.add(locker);
|
||||
|
||||
if (present)
|
||||
assert(entry.m_marked.isSubset(entry.m_starving));
|
||||
}
|
||||
|
||||
void
|
||||
PersistentTable::persistentRequestUnlock(const Address& address,
|
||||
MachineID unlocker)
|
||||
{
|
||||
#if 0
|
||||
if (unlocker == m_chip_ptr->getID())
|
||||
cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker
|
||||
<< " requesting unlock for " << address << endl;
|
||||
|
||||
MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
|
||||
#endif
|
||||
|
||||
assert(address == line_address(address));
|
||||
assert(m_map.count(address));
|
||||
PersistentTableEntry& entry = m_map[address];
|
||||
|
||||
//
|
||||
// Make sure we're in the locked set
|
||||
//
|
||||
assert(entry.m_starving.isElement(unlocker));
|
||||
assert(entry.m_marked.isSubset(entry.m_starving));
|
||||
entry.m_starving.remove(unlocker);
|
||||
entry.m_marked.remove(unlocker);
|
||||
entry.m_request_to_write.remove(unlocker);
|
||||
assert(entry.m_marked.isSubset(entry.m_starving));
|
||||
|
||||
// Deallocate if empty
|
||||
if (entry.m_starving.isEmpty()) {
|
||||
assert(entry.m_marked.isEmpty());
|
||||
m_map.erase(address);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
PersistentTable::okToIssueStarving(const Address& address,
|
||||
MachineID machId) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
|
||||
AddressMap::const_iterator i = m_map.find(address);
|
||||
if (i == m_map.end()) {
|
||||
// No entry present
|
||||
return true;
|
||||
}
|
||||
|
||||
const PersistentTableEntry &entry = i->second;
|
||||
|
||||
if (entry.m_starving.isElement(machId)) {
|
||||
// We can't issue another lockdown until are previous unlock
|
||||
// has occurred
|
||||
return false;
|
||||
}
|
||||
|
||||
return entry.m_marked.isEmpty();
|
||||
}
|
||||
|
||||
MachineID
|
||||
PersistentTable::findSmallest(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
AddressMap::const_iterator i = m_map.find(address);
|
||||
assert(i != m_map.end());
|
||||
const PersistentTableEntry& entry = i->second;
|
||||
return entry.m_starving.smallestElement();
|
||||
}
|
||||
|
||||
AccessType
|
||||
PersistentTable::typeOfSmallest(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
AddressMap::const_iterator i = m_map.find(address);
|
||||
assert(i != m_map.end());
|
||||
const PersistentTableEntry& entry = i->second;
|
||||
if (entry.m_request_to_write.
|
||||
isElement(entry.m_starving.smallestElement())) {
|
||||
return AccessType_Write;
|
||||
} else {
|
||||
return AccessType_Read;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PersistentTable::markEntries(const Address& address)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
AddressMap::iterator i = m_map.find(address);
|
||||
if (i == m_map.end())
|
||||
return;
|
||||
|
||||
PersistentTableEntry& entry = i->second;
|
||||
|
||||
// None should be marked
|
||||
assert(entry.m_marked.isEmpty());
|
||||
|
||||
// Mark all the nodes currently in the table
|
||||
entry.m_marked = entry.m_starving;
|
||||
}
|
||||
|
||||
bool
|
||||
PersistentTable::isLocked(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
|
||||
// If an entry is present, it must be locked
|
||||
return m_map.count(address) > 0;
|
||||
}
|
||||
|
||||
int
|
||||
PersistentTable::countStarvingForAddress(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
AddressMap::const_iterator i = m_map.find(address);
|
||||
if (i == m_map.end())
|
||||
return 0;
|
||||
|
||||
const PersistentTableEntry& entry = i->second;
|
||||
return entry.m_starving.count();
|
||||
}
|
||||
|
||||
int
|
||||
PersistentTable::countReadStarvingForAddress(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
AddressMap::const_iterator i = m_map.find(address);
|
||||
if (i == m_map.end())
|
||||
return 0;
|
||||
|
||||
const PersistentTableEntry& entry = i->second;
|
||||
return entry.m_starving.count() - entry.m_request_to_write.count();
|
||||
}
|
||||
|
||||
void
|
||||
PersistentTable::print(ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
102
simulators/gem5/src/mem/ruby/system/PersistentTable.hh
Normal file
102
simulators/gem5/src/mem/ruby/system/PersistentTable.hh
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
|
||||
#define __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "base/hashmap.hh"
|
||||
#include "mem/protocol/AccessType.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/common/NetDest.hh"
|
||||
#include "mem/ruby/system/MachineID.hh"
|
||||
|
||||
class PersistentTableEntry
|
||||
{
|
||||
public:
|
||||
PersistentTableEntry() {}
|
||||
void print(std::ostream& out) const {}
|
||||
|
||||
NetDest m_starving;
|
||||
NetDest m_marked;
|
||||
NetDest m_request_to_write;
|
||||
};
|
||||
|
||||
class PersistentTable
|
||||
{
|
||||
public:
|
||||
// Constructors
|
||||
PersistentTable();
|
||||
|
||||
// Destructor
|
||||
~PersistentTable();
|
||||
|
||||
// Public Methods
|
||||
void persistentRequestLock(const Address& address, MachineID locker,
|
||||
AccessType type);
|
||||
void persistentRequestUnlock(const Address& address, MachineID unlocker);
|
||||
bool okToIssueStarving(const Address& address, MachineID machID) const;
|
||||
MachineID findSmallest(const Address& address) const;
|
||||
AccessType typeOfSmallest(const Address& address) const;
|
||||
void markEntries(const Address& address);
|
||||
bool isLocked(const Address& addr) const;
|
||||
int countStarvingForAddress(const Address& addr) const;
|
||||
int countReadStarvingForAddress(const Address& addr) const;
|
||||
|
||||
static void printConfig(std::ostream& out) {}
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
// Private copy constructor and assignment operator
|
||||
PersistentTable(const PersistentTable& obj);
|
||||
PersistentTable& operator=(const PersistentTable& obj);
|
||||
|
||||
// Data Members (m_prefix)
|
||||
typedef m5::hash_map<Address, PersistentTableEntry> AddressMap;
|
||||
AddressMap m_map;
|
||||
};
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const PersistentTable& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const PersistentTableEntry& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
|
||||
137
simulators/gem5/src/mem/ruby/system/PseudoLRUPolicy.hh
Normal file
137
simulators/gem5/src/mem/ruby/system/PseudoLRUPolicy.hh
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
|
||||
|
||||
#include "mem/ruby/system/AbstractReplacementPolicy.hh"
|
||||
|
||||
/**
|
||||
* Implementation of tree-based pseudo-LRU replacement
|
||||
*
|
||||
* Works for any associativity between 1 and 128.
|
||||
*
|
||||
* Also implements associativities that are not a power of 2 by
|
||||
* ignoring paths that lead to a larger index (i.e. truncating the
|
||||
* tree). Note that when this occurs, the algorithm becomes less
|
||||
* fair, as it will favor indicies in the larger (by index) half of
|
||||
* the associative set. This is most unfair when the nearest power of
|
||||
* 2 is one below the associativy, and most fair when it is one above.
|
||||
*/
|
||||
|
||||
class PseudoLRUPolicy : public AbstractReplacementPolicy
|
||||
{
|
||||
public:
|
||||
PseudoLRUPolicy(Index num_sets, Index assoc);
|
||||
~PseudoLRUPolicy();
|
||||
|
||||
void touch(Index set, Index way, Time time);
|
||||
Index getVictim(Index set) const;
|
||||
|
||||
private:
|
||||
unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
|
||||
unsigned int m_num_levels; /** number of levels in the tree */
|
||||
uint64* m_trees; /** bit representation of the
|
||||
* trees, one for each set */
|
||||
};
|
||||
|
||||
inline
|
||||
PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc)
|
||||
: AbstractReplacementPolicy(num_sets, assoc)
|
||||
{
|
||||
// associativity cannot exceed capacity of tree representation
|
||||
assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
|
||||
|
||||
m_trees = NULL;
|
||||
m_num_levels = 0;
|
||||
|
||||
m_effective_assoc = 1;
|
||||
while (m_effective_assoc < assoc) {
|
||||
// effective associativity is ceiling power of 2
|
||||
m_effective_assoc <<= 1;
|
||||
}
|
||||
assoc = m_effective_assoc;
|
||||
while (true) {
|
||||
assoc /= 2;
|
||||
if(!assoc) break;
|
||||
m_num_levels++;
|
||||
}
|
||||
assert(m_num_levels < sizeof(unsigned int)*4);
|
||||
m_trees = new uint64[m_num_sets];
|
||||
for (unsigned i = 0; i < m_num_sets; i++) {
|
||||
m_trees[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
PseudoLRUPolicy::~PseudoLRUPolicy()
|
||||
{
|
||||
if (m_trees != NULL)
|
||||
delete[] m_trees;
|
||||
}
|
||||
|
||||
inline void
|
||||
PseudoLRUPolicy::touch(Index set, Index index, Time time)
|
||||
{
|
||||
assert(index >= 0 && index < m_assoc);
|
||||
assert(set >= 0 && set < m_num_sets);
|
||||
|
||||
int tree_index = 0;
|
||||
int node_val;
|
||||
for (int i = m_num_levels - 1; i >= 0; i--) {
|
||||
node_val = (index >> i)&1;
|
||||
if (node_val)
|
||||
m_trees[set] |= node_val << tree_index;
|
||||
else
|
||||
m_trees[set] &= ~(1 << tree_index);
|
||||
tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
|
||||
}
|
||||
m_last_ref_ptr[set][index] = time;
|
||||
}
|
||||
|
||||
inline Index
|
||||
PseudoLRUPolicy::getVictim(Index set) const
|
||||
{
|
||||
// assert(m_assoc != 0);
|
||||
Index index = 0;
|
||||
|
||||
int tree_index = 0;
|
||||
int node_val;
|
||||
for (unsigned i = 0; i < m_num_levels; i++){
|
||||
node_val = (m_trees[set] >> tree_index) & 1;
|
||||
index += node_val ? 0 : (m_effective_assoc >> (i + 1));
|
||||
tree_index = node_val ? (tree_index * 2) + 1 : (tree_index * 2) + 2;
|
||||
}
|
||||
assert(index >= 0 && index < m_effective_assoc);
|
||||
|
||||
/* return either the found index or the max possible index */
|
||||
/* NOTE: this is not a fair replacement when assoc is not a power of 2 */
|
||||
return (index > (m_assoc - 1)) ? m_assoc - 1 : index;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
|
||||
704
simulators/gem5/src/mem/ruby/system/RubyPort.cc
Normal file
704
simulators/gem5/src/mem/ruby/system/RubyPort.cc
Normal file
@ -0,0 +1,704 @@
|
||||
/*
|
||||
* Copyright (c) 2012 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
* Copyright (c) 2011 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "cpu/testers/rubytest/RubyTester.hh"
|
||||
#include "debug/Config.hh"
|
||||
#include "debug/Ruby.hh"
|
||||
#include "mem/protocol/AccessPermission.hh"
|
||||
#include "mem/ruby/slicc_interface/AbstractController.hh"
|
||||
#include "mem/ruby/system/RubyPort.hh"
|
||||
#include "sim/system.hh"
|
||||
|
||||
RubyPort::RubyPort(const Params *p)
|
||||
: MemObject(p), m_version(p->version), m_controller(NULL),
|
||||
m_mandatory_q_ptr(NULL),
|
||||
pio_port(csprintf("%s-pio-port", name()), this),
|
||||
m_usingRubyTester(p->using_ruby_tester), m_request_cnt(0),
|
||||
drainEvent(NULL), ruby_system(p->ruby_system), system(p->system),
|
||||
waitingOnSequencer(false), access_phys_mem(p->access_phys_mem)
|
||||
{
|
||||
assert(m_version != -1);
|
||||
|
||||
// create the slave ports based on the number of connected ports
|
||||
for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
|
||||
slave_ports.push_back(new M5Port(csprintf("%s-slave%d", name(), i),
|
||||
this, ruby_system, access_phys_mem));
|
||||
}
|
||||
|
||||
// create the master ports based on the number of connected ports
|
||||
for (size_t i = 0; i < p->port_master_connection_count; ++i) {
|
||||
master_ports.push_back(new PioPort(csprintf("%s-master%d", name(), i),
|
||||
this));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RubyPort::init()
|
||||
{
|
||||
assert(m_controller != NULL);
|
||||
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
|
||||
}
|
||||
|
||||
MasterPort &
|
||||
RubyPort::getMasterPort(const std::string &if_name, int idx)
|
||||
{
|
||||
if (if_name == "pio_port") {
|
||||
return pio_port;
|
||||
}
|
||||
|
||||
// used by the x86 CPUs to connect the interrupt PIO and interrupt slave
|
||||
// port
|
||||
if (if_name != "master") {
|
||||
// pass it along to our super class
|
||||
return MemObject::getMasterPort(if_name, idx);
|
||||
} else {
|
||||
if (idx >= static_cast<int>(master_ports.size())) {
|
||||
panic("RubyPort::getMasterPort: unknown index %d\n", idx);
|
||||
}
|
||||
|
||||
return *master_ports[idx];
|
||||
}
|
||||
}
|
||||
|
||||
SlavePort &
|
||||
RubyPort::getSlavePort(const std::string &if_name, int idx)
|
||||
{
|
||||
// used by the CPUs to connect the caches to the interconnect, and
|
||||
// for the x86 case also the interrupt master
|
||||
if (if_name != "slave") {
|
||||
// pass it along to our super class
|
||||
return MemObject::getSlavePort(if_name, idx);
|
||||
} else {
|
||||
if (idx >= static_cast<int>(slave_ports.size())) {
|
||||
panic("RubyPort::getSlavePort: unknown index %d\n", idx);
|
||||
}
|
||||
|
||||
return *slave_ports[idx];
|
||||
}
|
||||
}
|
||||
|
||||
RubyPort::PioPort::PioPort(const std::string &_name,
|
||||
RubyPort *_port)
|
||||
: QueuedMasterPort(_name, _port, queue), queue(*_port, *this),
|
||||
ruby_port(_port)
|
||||
{
|
||||
DPRINTF(RubyPort, "creating master port on ruby sequencer %s\n", _name);
|
||||
}
|
||||
|
||||
RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
|
||||
RubySystem *_system, bool _access_phys_mem)
|
||||
: QueuedSlavePort(_name, _port, queue), queue(*_port, *this),
|
||||
ruby_port(_port), ruby_system(_system),
|
||||
_onRetryList(false), access_phys_mem(_access_phys_mem)
|
||||
{
|
||||
DPRINTF(RubyPort, "creating slave port on ruby sequencer %s\n", _name);
|
||||
}
|
||||
|
||||
Tick
|
||||
RubyPort::M5Port::recvAtomic(PacketPtr pkt)
|
||||
{
|
||||
panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
RubyPort::PioPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// In FS mode, ruby memory will receive pio responses from devices
|
||||
// and it must forward these responses back to the particular CPU.
|
||||
DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
|
||||
|
||||
// First we must retrieve the request port from the sender State
|
||||
RubyPort::SenderState *senderState =
|
||||
safe_cast<RubyPort::SenderState *>(pkt->senderState);
|
||||
M5Port *port = senderState->port;
|
||||
assert(port != NULL);
|
||||
|
||||
// pop the sender state from the packet
|
||||
pkt->senderState = senderState->saved;
|
||||
delete senderState;
|
||||
|
||||
port->sendTimingResp(pkt);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
DPRINTF(RubyPort,
|
||||
"Timing access caught for address %#x\n", pkt->getAddr());
|
||||
|
||||
//dsm: based on SimpleTimingPort::recvTimingReq(pkt);
|
||||
|
||||
// The received packets should only be M5 requests, which should never
|
||||
// get nacked. There used to be code to hanldle nacks here, but
|
||||
// I'm pretty sure it didn't work correctly with the drain code,
|
||||
// so that would need to be fixed if we ever added it back.
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
warn("memInhibitAsserted???");
|
||||
// snooper will supply based on copy of packet
|
||||
// still target's responsibility to delete packet
|
||||
delete pkt;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Save the port in the sender state object to be used later to
|
||||
// route the response
|
||||
pkt->senderState = new SenderState(this, pkt->senderState);
|
||||
|
||||
// Check for pio requests and directly send them to the dedicated
|
||||
// pio port.
|
||||
if (!isPhysMemAddress(pkt->getAddr())) {
|
||||
assert(ruby_port->pio_port.isConnected());
|
||||
DPRINTF(RubyPort,
|
||||
"Request for address 0x%#x is assumed to be a pio request\n",
|
||||
pkt->getAddr());
|
||||
|
||||
return ruby_port->pio_port.sendNextCycle(pkt);
|
||||
}
|
||||
|
||||
assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
|
||||
RubySystem::getBlockSizeBytes());
|
||||
|
||||
// Submit the ruby request
|
||||
RequestStatus requestStatus = ruby_port->makeRequest(pkt);
|
||||
|
||||
// If the request successfully issued then we should return true.
|
||||
// Otherwise, we need to delete the senderStatus we just created and return
|
||||
// false.
|
||||
if (requestStatus == RequestStatus_Issued) {
|
||||
DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr());
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Unless one is using the ruby tester, record the stalled M5 port for
|
||||
// later retry when the sequencer becomes free.
|
||||
//
|
||||
if (!ruby_port->m_usingRubyTester) {
|
||||
ruby_port->addToRetryList(this);
|
||||
}
|
||||
|
||||
DPRINTF(RubyPort,
|
||||
"Request for address %#x did not issue because %s\n",
|
||||
pkt->getAddr(), RequestStatus_to_string(requestStatus));
|
||||
|
||||
SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
|
||||
pkt->senderState = senderState->saved;
|
||||
delete senderState;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::M5Port::doFunctionalRead(PacketPtr pkt)
|
||||
{
|
||||
Address address(pkt->getAddr());
|
||||
Address line_address(address);
|
||||
line_address.makeLineAddress();
|
||||
|
||||
AccessPermission access_perm = AccessPermission_NotPresent;
|
||||
int num_controllers = ruby_system->m_abs_cntrl_vec.size();
|
||||
|
||||
DPRINTF(RubyPort, "Functional Read request for %s\n",address);
|
||||
|
||||
unsigned int num_ro = 0;
|
||||
unsigned int num_rw = 0;
|
||||
unsigned int num_busy = 0;
|
||||
unsigned int num_backing_store = 0;
|
||||
unsigned int num_invalid = 0;
|
||||
|
||||
// In this loop we count the number of controllers that have the given
|
||||
// address in read only, read write and busy states.
|
||||
for (int i = 0; i < num_controllers; ++i) {
|
||||
access_perm = ruby_system->m_abs_cntrl_vec[i]->
|
||||
getAccessPermission(line_address);
|
||||
if (access_perm == AccessPermission_Read_Only)
|
||||
num_ro++;
|
||||
else if (access_perm == AccessPermission_Read_Write)
|
||||
num_rw++;
|
||||
else if (access_perm == AccessPermission_Busy)
|
||||
num_busy++;
|
||||
else if (access_perm == AccessPermission_Backing_Store)
|
||||
// See RubySlicc_Exports.sm for details, but Backing_Store is meant
|
||||
// to represent blocks in memory *for Broadcast/Snooping protocols*,
|
||||
// where memory has no idea whether it has an exclusive copy of data
|
||||
// or not.
|
||||
num_backing_store++;
|
||||
else if (access_perm == AccessPermission_Invalid ||
|
||||
access_perm == AccessPermission_NotPresent)
|
||||
num_invalid++;
|
||||
}
|
||||
assert(num_rw <= 1);
|
||||
|
||||
uint8* data = pkt->getPtr<uint8_t>(true);
|
||||
unsigned int size_in_bytes = pkt->getSize();
|
||||
unsigned startByte = address.getAddress() - line_address.getAddress();
|
||||
|
||||
// This if case is meant to capture what happens in a Broadcast/Snoop
|
||||
// protocol where the block does not exist in the cache hierarchy. You
|
||||
// only want to read from the Backing_Store memory if there is no copy in
|
||||
// the cache hierarchy, otherwise you want to try to read the RO or RW
|
||||
// copies existing in the cache hierarchy (covered by the else statement).
|
||||
// The reason is because the Backing_Store memory could easily be stale, if
|
||||
// there are copies floating around the cache hierarchy, so you want to read
|
||||
// it only if it's not in the cache hierarchy at all.
|
||||
if (num_invalid == (num_controllers - 1) &&
|
||||
num_backing_store == 1)
|
||||
{
|
||||
DPRINTF(RubyPort, "only copy in Backing_Store memory, read from it\n");
|
||||
for (int i = 0; i < num_controllers; ++i) {
|
||||
access_perm = ruby_system->m_abs_cntrl_vec[i]
|
||||
->getAccessPermission(line_address);
|
||||
if (access_perm == AccessPermission_Backing_Store) {
|
||||
DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
|
||||
->getDataBlock(line_address);
|
||||
|
||||
DPRINTF(RubyPort, "reading from %s block %s\n",
|
||||
ruby_system->m_abs_cntrl_vec[i]->name(), block);
|
||||
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
||||
data[i] = block.getByte(i + startByte);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// In Broadcast/Snoop protocols, this covers if you know the block
|
||||
// exists somewhere in the caching hierarchy, then you want to read any
|
||||
// valid RO or RW block. In directory protocols, same thing, you want
|
||||
// to read any valid readable copy of the block.
|
||||
DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n",
|
||||
num_busy, num_ro, num_rw);
|
||||
// In this loop, we try to figure which controller has a read only or
|
||||
// a read write copy of the given address. Any valid copy would suffice
|
||||
// for a functional read.
|
||||
for(int i = 0;i < num_controllers;++i) {
|
||||
access_perm = ruby_system->m_abs_cntrl_vec[i]
|
||||
->getAccessPermission(line_address);
|
||||
if(access_perm == AccessPermission_Read_Only ||
|
||||
access_perm == AccessPermission_Read_Write)
|
||||
{
|
||||
DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
|
||||
->getDataBlock(line_address);
|
||||
|
||||
DPRINTF(RubyPort, "reading from %s block %s\n",
|
||||
ruby_system->m_abs_cntrl_vec[i]->name(), block);
|
||||
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
||||
data[i] = block.getByte(i + startByte);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::M5Port::doFunctionalWrite(PacketPtr pkt)
|
||||
{
|
||||
Address addr(pkt->getAddr());
|
||||
Address line_addr = line_address(addr);
|
||||
AccessPermission access_perm = AccessPermission_NotPresent;
|
||||
int num_controllers = ruby_system->m_abs_cntrl_vec.size();
|
||||
|
||||
DPRINTF(RubyPort, "Functional Write request for %s\n",addr);
|
||||
|
||||
unsigned int num_ro = 0;
|
||||
unsigned int num_rw = 0;
|
||||
unsigned int num_busy = 0;
|
||||
unsigned int num_backing_store = 0;
|
||||
unsigned int num_invalid = 0;
|
||||
|
||||
// In this loop we count the number of controllers that have the given
|
||||
// address in read only, read write and busy states.
|
||||
for(int i = 0;i < num_controllers;++i) {
|
||||
access_perm = ruby_system->m_abs_cntrl_vec[i]->
|
||||
getAccessPermission(line_addr);
|
||||
if (access_perm == AccessPermission_Read_Only)
|
||||
num_ro++;
|
||||
else if (access_perm == AccessPermission_Read_Write)
|
||||
num_rw++;
|
||||
else if (access_perm == AccessPermission_Busy)
|
||||
num_busy++;
|
||||
else if (access_perm == AccessPermission_Backing_Store)
|
||||
// See RubySlicc_Exports.sm for details, but Backing_Store is meant
|
||||
// to represent blocks in memory *for Broadcast/Snooping protocols*,
|
||||
// where memory has no idea whether it has an exclusive copy of data
|
||||
// or not.
|
||||
num_backing_store++;
|
||||
else if (access_perm == AccessPermission_Invalid ||
|
||||
access_perm == AccessPermission_NotPresent)
|
||||
num_invalid++;
|
||||
}
|
||||
|
||||
// If the number of read write copies is more than 1, then there is bug in
|
||||
// coherence protocol. Otherwise, if all copies are in stable states, i.e.
|
||||
// num_busy == 0, we update all the copies. If there is at least one copy
|
||||
// in busy state, then we check if there is read write copy. If yes, then
|
||||
// also we let the access go through. Or, if there is no copy in the cache
|
||||
// hierarchy at all, we still want to do the write to the memory
|
||||
// (Backing_Store) instead of failing.
|
||||
|
||||
DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n",
|
||||
num_busy, num_ro, num_rw);
|
||||
assert(num_rw <= 1);
|
||||
|
||||
uint8* data = pkt->getPtr<uint8_t>(true);
|
||||
unsigned int size_in_bytes = pkt->getSize();
|
||||
unsigned startByte = addr.getAddress() - line_addr.getAddress();
|
||||
|
||||
if ((num_busy == 0 && num_ro > 0) || num_rw == 1 ||
|
||||
(num_invalid == (num_controllers - 1) && num_backing_store == 1))
|
||||
{
|
||||
for(int i = 0; i < num_controllers;++i) {
|
||||
access_perm = ruby_system->m_abs_cntrl_vec[i]->
|
||||
getAccessPermission(line_addr);
|
||||
if(access_perm == AccessPermission_Read_Only ||
|
||||
access_perm == AccessPermission_Read_Write||
|
||||
access_perm == AccessPermission_Maybe_Stale ||
|
||||
access_perm == AccessPermission_Backing_Store)
|
||||
{
|
||||
DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
|
||||
->getDataBlock(line_addr);
|
||||
|
||||
DPRINTF(RubyPort, "%s\n",block);
|
||||
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
||||
block.setByte(i + startByte, data[i]);
|
||||
}
|
||||
DPRINTF(RubyPort, "%s\n",block);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
RubyPort::M5Port::recvFunctional(PacketPtr pkt)
|
||||
{
|
||||
DPRINTF(RubyPort, "Functional access caught for address %#x\n",
|
||||
pkt->getAddr());
|
||||
|
||||
// Check for pio requests and directly send them to the dedicated
|
||||
// pio port.
|
||||
if (!isPhysMemAddress(pkt->getAddr())) {
|
||||
assert(ruby_port->pio_port.isConnected());
|
||||
DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n",
|
||||
pkt->getAddr());
|
||||
panic("RubyPort::PioPort::recvFunctional() not implemented!\n");
|
||||
}
|
||||
|
||||
assert(pkt->getAddr() + pkt->getSize() <=
|
||||
line_address(Address(pkt->getAddr())).getAddress() +
|
||||
RubySystem::getBlockSizeBytes());
|
||||
|
||||
bool accessSucceeded = false;
|
||||
bool needsResponse = pkt->needsResponse();
|
||||
|
||||
// Do the functional access on ruby memory
|
||||
if (pkt->isRead()) {
|
||||
accessSucceeded = doFunctionalRead(pkt);
|
||||
} else if (pkt->isWrite()) {
|
||||
accessSucceeded = doFunctionalWrite(pkt);
|
||||
} else {
|
||||
panic("RubyPort: unsupported functional command %s\n",
|
||||
pkt->cmdString());
|
||||
}
|
||||
|
||||
// Unless the requester explicitly said otherwise, generate an error if
|
||||
// the functional request failed
|
||||
if (!accessSucceeded && !pkt->suppressFuncError()) {
|
||||
fatal("Ruby functional %s failed for address %#x\n",
|
||||
pkt->isWrite() ? "write" : "read", pkt->getAddr());
|
||||
}
|
||||
|
||||
if (access_phys_mem) {
|
||||
// The attached physmem contains the official version of data.
|
||||
// The following command performs the real functional access.
|
||||
// This line should be removed once Ruby supplies the official version
|
||||
// of data.
|
||||
ruby_port->system->getPhysMem().functionalAccess(pkt);
|
||||
}
|
||||
|
||||
// turn packet around to go back to requester if response expected
|
||||
if (needsResponse) {
|
||||
pkt->setFunctionalResponseStatus(accessSucceeded);
|
||||
|
||||
// @todo There should not be a reverse call since the response is
|
||||
// communicated through the packet pointer
|
||||
// DPRINTF(RubyPort, "Sending packet back over port\n");
|
||||
// sendFunctional(pkt);
|
||||
}
|
||||
DPRINTF(RubyPort, "Functional access %s!\n",
|
||||
accessSucceeded ? "successful":"failed");
|
||||
}
|
||||
|
||||
void
|
||||
RubyPort::ruby_hit_callback(PacketPtr pkt)
|
||||
{
|
||||
// Retrieve the request port from the sender State
|
||||
RubyPort::SenderState *senderState =
|
||||
safe_cast<RubyPort::SenderState *>(pkt->senderState);
|
||||
M5Port *port = senderState->port;
|
||||
assert(port != NULL);
|
||||
|
||||
// pop the sender state from the packet
|
||||
pkt->senderState = senderState->saved;
|
||||
delete senderState;
|
||||
|
||||
port->hitCallback(pkt);
|
||||
|
||||
//
|
||||
// If we had to stall the M5Ports, wake them up because the sequencer
|
||||
// likely has free resources now.
|
||||
//
|
||||
if (waitingOnSequencer) {
|
||||
//
|
||||
// Record the current list of ports to retry on a temporary list before
|
||||
// calling sendRetry on those ports. sendRetry will cause an
|
||||
// immediate retry, which may result in the ports being put back on the
|
||||
// list. Therefore we want to clear the retryList before calling
|
||||
// sendRetry.
|
||||
//
|
||||
std::list<M5Port*> curRetryList(retryList);
|
||||
|
||||
retryList.clear();
|
||||
waitingOnSequencer = false;
|
||||
|
||||
for (std::list<M5Port*>::iterator i = curRetryList.begin();
|
||||
i != curRetryList.end(); ++i) {
|
||||
DPRINTF(RubyPort,
|
||||
"Sequencer may now be free. SendRetry to port %s\n",
|
||||
(*i)->name());
|
||||
(*i)->onRetryList(false);
|
||||
(*i)->sendRetry();
|
||||
}
|
||||
}
|
||||
|
||||
testDrainComplete();
|
||||
}
|
||||
|
||||
void
|
||||
RubyPort::testDrainComplete()
|
||||
{
|
||||
//If we weren't able to drain before, we might be able to now.
|
||||
if (drainEvent != NULL) {
|
||||
unsigned int drainCount = getDrainCount(drainEvent);
|
||||
DPRINTF(Config, "Drain count: %u\n", drainCount);
|
||||
if (drainCount == 0) {
|
||||
drainEvent->process();
|
||||
// Clear the drain event once we're done with it.
|
||||
drainEvent = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int
|
||||
RubyPort::getDrainCount(Event *de)
|
||||
{
|
||||
int count = 0;
|
||||
//
|
||||
// If the sequencer is not empty, then requests need to drain.
|
||||
// The outstandingCount is the number of requests outstanding and thus the
|
||||
// number of times M5's timing port will process the drain event.
|
||||
//
|
||||
count += outstandingCount();
|
||||
|
||||
DPRINTF(Config, "outstanding count %d\n", outstandingCount());
|
||||
|
||||
// To simplify the draining process, the sequencer's deadlock detection
|
||||
// event should have been descheduled.
|
||||
assert(isDeadlockEventScheduled() == false);
|
||||
|
||||
if (pio_port.isConnected()) {
|
||||
count += pio_port.drain(de);
|
||||
DPRINTF(Config, "count after pio check %d\n", count);
|
||||
}
|
||||
|
||||
for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
|
||||
count += (*p)->drain(de);
|
||||
DPRINTF(Config, "count after slave port check %d\n", count);
|
||||
}
|
||||
|
||||
for (std::vector<PioPort*>::iterator p = master_ports.begin();
|
||||
p != master_ports.end(); ++p) {
|
||||
count += (*p)->drain(de);
|
||||
DPRINTF(Config, "count after master port check %d\n", count);
|
||||
}
|
||||
|
||||
DPRINTF(Config, "final count %d\n", count);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
RubyPort::drain(Event *de)
|
||||
{
|
||||
if (isDeadlockEventScheduled()) {
|
||||
descheduleDeadlockEvent();
|
||||
}
|
||||
|
||||
int count = getDrainCount(de);
|
||||
|
||||
// Set status
|
||||
if (count != 0) {
|
||||
drainEvent = de;
|
||||
|
||||
changeState(SimObject::Draining);
|
||||
return count;
|
||||
}
|
||||
|
||||
changeState(SimObject::Drained);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
RubyPort::M5Port::hitCallback(PacketPtr pkt)
|
||||
{
|
||||
bool needsResponse = pkt->needsResponse();
|
||||
|
||||
//
|
||||
// Unless specified at configuraiton, all responses except failed SC
|
||||
// and Flush operations access M5 physical memory.
|
||||
//
|
||||
bool accessPhysMem = access_phys_mem;
|
||||
|
||||
if (pkt->isLLSC()) {
|
||||
if (pkt->isWrite()) {
|
||||
if (pkt->req->getExtraData() != 0) {
|
||||
//
|
||||
// Successful SC packets convert to normal writes
|
||||
//
|
||||
pkt->convertScToWrite();
|
||||
} else {
|
||||
//
|
||||
// Failed SC packets don't access physical memory and thus
|
||||
// the RubyPort itself must convert it to a response.
|
||||
//
|
||||
accessPhysMem = false;
|
||||
}
|
||||
} else {
|
||||
//
|
||||
// All LL packets convert to normal loads so that M5 PhysMem does
|
||||
// not lock the blocks.
|
||||
//
|
||||
pkt->convertLlToRead();
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Flush requests don't access physical memory
|
||||
//
|
||||
if (pkt->isFlush()) {
|
||||
accessPhysMem = false;
|
||||
}
|
||||
|
||||
DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
|
||||
|
||||
if (accessPhysMem) {
|
||||
ruby_port->system->getPhysMem().access(pkt);
|
||||
} else if (needsResponse) {
|
||||
pkt->makeResponse();
|
||||
}
|
||||
|
||||
// turn packet around to go back to requester if response expected
|
||||
if (needsResponse) {
|
||||
DPRINTF(RubyPort, "Sending packet back over port\n");
|
||||
sendNextCycle(pkt);
|
||||
} else {
|
||||
delete pkt;
|
||||
}
|
||||
DPRINTF(RubyPort, "Hit callback done!\n");
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::M5Port::sendNextCycle(PacketPtr pkt, bool send_as_snoop)
|
||||
{
|
||||
//minimum latency, must be > 0
|
||||
queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()),
|
||||
send_as_snoop);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::PioPort::sendNextCycle(PacketPtr pkt)
|
||||
{
|
||||
//minimum latency, must be > 0
|
||||
queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
|
||||
return true;
|
||||
}
|
||||
|
||||
AddrRangeList
|
||||
RubyPort::M5Port::getAddrRanges()
|
||||
{
|
||||
// at the moment the assumption is that the master does not care
|
||||
AddrRangeList ranges;
|
||||
return ranges;
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::M5Port::isPhysMemAddress(Addr addr)
|
||||
{
|
||||
return ruby_port->system->isMemAddr(addr);
|
||||
}
|
||||
|
||||
unsigned
|
||||
RubyPort::M5Port::deviceBlockSize() const
|
||||
{
|
||||
return (unsigned) RubySystem::getBlockSizeBytes();
|
||||
}
|
||||
|
||||
void
|
||||
RubyPort::ruby_eviction_callback(const Address& address)
|
||||
{
|
||||
DPRINTF(RubyPort, "Sending invalidations.\n");
|
||||
// should this really be using funcMasterId?
|
||||
Request req(address.getAddress(), 0, 0, Request::funcMasterId);
|
||||
for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
|
||||
if ((*p)->getMasterPort().isSnooping()) {
|
||||
Packet *pkt = new Packet(&req, MemCmd::InvalidationReq);
|
||||
// send as a snoop request
|
||||
(*p)->sendTimingSnoopReq(pkt);
|
||||
}
|
||||
}
|
||||
}
|
||||
196
simulators/gem5/src/mem/ruby/system/RubyPort.hh
Normal file
196
simulators/gem5/src/mem/ruby/system/RubyPort.hh
Normal file
@ -0,0 +1,196 @@
|
||||
/*
|
||||
* Copyright (c) 2012 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
* Copyright (c) 2011 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_RUBYPORT_HH__
|
||||
#define __MEM_RUBY_SYSTEM_RUBYPORT_HH__
|
||||
|
||||
#include <cassert>
|
||||
#include <string>
|
||||
|
||||
#include "mem/protocol/RequestStatus.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
#include "mem/mem_object.hh"
|
||||
#include "mem/physical.hh"
|
||||
#include "mem/tport.hh"
|
||||
#include "params/RubyPort.hh"
|
||||
|
||||
class MessageBuffer;
|
||||
class AbstractController;
|
||||
|
||||
class RubyPort : public MemObject
|
||||
{
|
||||
public:
|
||||
class M5Port : public QueuedSlavePort
|
||||
{
|
||||
private:
|
||||
|
||||
SlavePacketQueue queue;
|
||||
RubyPort *ruby_port;
|
||||
RubySystem* ruby_system;
|
||||
bool _onRetryList;
|
||||
bool access_phys_mem;
|
||||
|
||||
public:
|
||||
M5Port(const std::string &_name, RubyPort *_port,
|
||||
RubySystem*_system, bool _access_phys_mem);
|
||||
bool sendNextCycle(PacketPtr pkt, bool send_as_snoop = false);
|
||||
void hitCallback(PacketPtr pkt);
|
||||
void evictionCallback(const Address& address);
|
||||
unsigned deviceBlockSize() const;
|
||||
|
||||
bool onRetryList()
|
||||
{ return _onRetryList; }
|
||||
|
||||
void onRetryList(bool newVal)
|
||||
{ _onRetryList = newVal; }
|
||||
|
||||
protected:
|
||||
virtual bool recvTimingReq(PacketPtr pkt);
|
||||
virtual Tick recvAtomic(PacketPtr pkt);
|
||||
virtual void recvFunctional(PacketPtr pkt);
|
||||
virtual AddrRangeList getAddrRanges();
|
||||
|
||||
private:
|
||||
bool isPhysMemAddress(Addr addr);
|
||||
bool doFunctionalRead(PacketPtr pkt);
|
||||
bool doFunctionalWrite(PacketPtr pkt);
|
||||
};
|
||||
|
||||
friend class M5Port;
|
||||
|
||||
class PioPort : public QueuedMasterPort
|
||||
{
|
||||
private:
|
||||
|
||||
MasterPacketQueue queue;
|
||||
|
||||
RubyPort *ruby_port;
|
||||
|
||||
public:
|
||||
PioPort(const std::string &_name, RubyPort *_port);
|
||||
bool sendNextCycle(PacketPtr pkt);
|
||||
|
||||
protected:
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
};
|
||||
|
||||
friend class PioPort;
|
||||
|
||||
struct SenderState : public Packet::SenderState
|
||||
{
|
||||
M5Port* port;
|
||||
Packet::SenderState *saved;
|
||||
|
||||
SenderState(M5Port* _port, Packet::SenderState *sender_state = NULL)
|
||||
: port(_port), saved(sender_state)
|
||||
{}
|
||||
};
|
||||
|
||||
typedef RubyPortParams Params;
|
||||
RubyPort(const Params *p);
|
||||
virtual ~RubyPort() {}
|
||||
|
||||
void init();
|
||||
|
||||
MasterPort &getMasterPort(const std::string &if_name, int idx);
|
||||
SlavePort &getSlavePort(const std::string &if_name, int idx);
|
||||
|
||||
virtual RequestStatus makeRequest(PacketPtr pkt) = 0;
|
||||
virtual int outstandingCount() const = 0;
|
||||
virtual bool isDeadlockEventScheduled() const = 0;
|
||||
virtual void descheduleDeadlockEvent() = 0;
|
||||
|
||||
//
|
||||
// Called by the controller to give the sequencer a pointer.
|
||||
// A pointer to the controller is needed for atomic support.
|
||||
//
|
||||
void setController(AbstractController* _cntrl) { m_controller = _cntrl; }
|
||||
int getId() { return m_version; }
|
||||
unsigned int drain(Event *de);
|
||||
|
||||
protected:
|
||||
const std::string m_name;
|
||||
void ruby_hit_callback(PacketPtr pkt);
|
||||
void testDrainComplete();
|
||||
void ruby_eviction_callback(const Address& address);
|
||||
|
||||
int m_version;
|
||||
AbstractController* m_controller;
|
||||
MessageBuffer* m_mandatory_q_ptr;
|
||||
PioPort pio_port;
|
||||
bool m_usingRubyTester;
|
||||
|
||||
private:
|
||||
void addToRetryList(M5Port * port)
|
||||
{
|
||||
if (!port->onRetryList()) {
|
||||
port->onRetryList(true);
|
||||
retryList.push_back(port);
|
||||
waitingOnSequencer = true;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int getDrainCount(Event *de);
|
||||
|
||||
uint16_t m_port_id;
|
||||
uint64_t m_request_cnt;
|
||||
|
||||
/** Vector of M5 Ports attached to this Ruby port. */
|
||||
typedef std::vector<M5Port*>::iterator CpuPortIter;
|
||||
std::vector<M5Port*> slave_ports;
|
||||
std::vector<PioPort*> master_ports;
|
||||
|
||||
Event *drainEvent;
|
||||
|
||||
RubySystem* ruby_system;
|
||||
System* system;
|
||||
|
||||
//
|
||||
// Based on similar code in the M5 bus. Stores pointers to those ports
|
||||
// that should be called when the Sequencer becomes available after a stall.
|
||||
//
|
||||
std::list<M5Port*> retryList;
|
||||
|
||||
bool waitingOnSequencer;
|
||||
bool access_phys_mem;
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_RUBYPORT_HH__
|
||||
70
simulators/gem5/src/mem/ruby/system/RubyPortProxy.cc
Normal file
70
simulators/gem5/src/mem/ruby/system/RubyPortProxy.cc
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2011 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Andreas Hansson
|
||||
*/
|
||||
|
||||
#include "mem/ruby/system/RubyPortProxy.hh"
|
||||
|
||||
RubyPortProxy::RubyPortProxy(const RubyPortProxyParams* p) :
|
||||
RubyPort(p) {
|
||||
}
|
||||
|
||||
RubyPortProxy::~RubyPortProxy()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
RubyPortProxy::init()
|
||||
{
|
||||
// Merely override to not care about the m_controller being NULL
|
||||
}
|
||||
|
||||
RequestStatus
|
||||
RubyPortProxy::makeRequest(PacketPtr pkt)
|
||||
{
|
||||
// This sequencer should only be used through the functional
|
||||
// accesses made by the system port and so simply fail if this
|
||||
// happens.
|
||||
panic("RubyPortProxy::makeRequest should not be called");
|
||||
return RequestStatus_NULL;
|
||||
}
|
||||
|
||||
RubyPortProxy*
|
||||
RubyPortProxyParams::create()
|
||||
{
|
||||
return new RubyPortProxy(this);
|
||||
}
|
||||
114
simulators/gem5/src/mem/ruby/system/RubyPortProxy.hh
Normal file
114
simulators/gem5/src/mem/ruby/system/RubyPortProxy.hh
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Copyright (c) 2011 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Andreas Hansson
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* RobyPortProxy for connecting system port to Ruby
|
||||
*
|
||||
* A trivial wrapper that allows the system port to connect to Ruby
|
||||
* and use nothing but functional accesses.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_RUBYPORTPROXY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_RUBYPORTPROXY_HH__
|
||||
|
||||
#include "mem/ruby/system/RubyPort.hh"
|
||||
#include "params/RubyPortProxy.hh"
|
||||
|
||||
class RubyPortProxy : public RubyPort
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Create a new RubyPortProxy.
|
||||
*
|
||||
* @param p Parameters inherited from the RubyPort
|
||||
*/
|
||||
RubyPortProxy(const RubyPortProxyParams* p);
|
||||
|
||||
/**
|
||||
* Destruct a RubyPortProxy.
|
||||
*/
|
||||
virtual ~RubyPortProxy();
|
||||
|
||||
/**
|
||||
* Initialise a RubyPortProxy by doing nothing and avoid
|
||||
* involving the super class.
|
||||
*/
|
||||
void init();
|
||||
|
||||
/**
|
||||
* Pure virtual member in the super class that we are forced to
|
||||
* implement even if it is never used (since there are only
|
||||
* functional accesses).
|
||||
*
|
||||
* @param pkt The packet to serve to Ruby
|
||||
* @returns always a NULL status
|
||||
*/
|
||||
RequestStatus makeRequest(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Pure virtual member in the super class that we are forced to
|
||||
* implement even if it is never used (since there are only
|
||||
* functional accesses).
|
||||
*
|
||||
* @returns always 0
|
||||
*/
|
||||
int outstandingCount() const { return 0; }
|
||||
|
||||
/**
|
||||
* Pure virtual member in the super class that we are forced to
|
||||
* implement even if it is never used (since there are only
|
||||
* functional accesses).
|
||||
*
|
||||
* @returns always false
|
||||
*/
|
||||
bool isDeadlockEventScheduled() const { return false; }
|
||||
|
||||
/**
|
||||
* Pure virtual member in the super class that we are forced to
|
||||
* implement even if it is never used (since there are only
|
||||
* functional accesses).
|
||||
*/
|
||||
void descheduleDeadlockEvent() { }
|
||||
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_RUBYPORTPROXY_HH__
|
||||
44
simulators/gem5/src/mem/ruby/system/RubySystem.py
Normal file
44
simulators/gem5/src/mem/ruby/system/RubySystem.py
Normal file
@ -0,0 +1,44 @@
|
||||
# Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Steve Reinhardt
|
||||
# Brad Beckmann
|
||||
|
||||
from m5.params import *
|
||||
from m5.SimObject import SimObject
|
||||
|
||||
class RubySystem(SimObject):
|
||||
type = 'RubySystem'
|
||||
random_seed = Param.Int(1234, "random seed used by the simulation");
|
||||
randomization = Param.Bool(False,
|
||||
"insert random delays on message enqueue times");
|
||||
clock = Param.Clock('1GHz', "")
|
||||
block_size_bytes = Param.Int(64,
|
||||
"default cache block size; must be a power of two");
|
||||
mem_size = Param.MemorySize("total memory size of the system");
|
||||
stats_filename = Param.String("ruby.stats",
|
||||
"file to which ruby dumps its stats")
|
||||
no_mem_vec = Param.Bool(False, "do not allocate Ruby's mem vector");
|
||||
55
simulators/gem5/src/mem/ruby/system/SConscript
Normal file
55
simulators/gem5/src/mem/ruby/system/SConscript
Normal file
@ -0,0 +1,55 @@
|
||||
# -*- mode:python -*-
|
||||
|
||||
# Copyright (c) 2009 The Hewlett-Packard Development Company
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Nathan Binkert
|
||||
|
||||
Import('*')
|
||||
|
||||
if env['PROTOCOL'] == 'None':
|
||||
Return()
|
||||
|
||||
SimObject('Cache.py')
|
||||
SimObject('Sequencer.py')
|
||||
SimObject('DirectoryMemory.py')
|
||||
SimObject('MemoryControl.py')
|
||||
SimObject('WireBuffer.py')
|
||||
SimObject('RubySystem.py')
|
||||
|
||||
Source('DMASequencer.cc')
|
||||
Source('DirectoryMemory.cc')
|
||||
Source('SparseMemory.cc')
|
||||
Source('CacheMemory.cc')
|
||||
Source('MemoryControl.cc')
|
||||
Source('WireBuffer.cc')
|
||||
Source('MemoryNode.cc')
|
||||
Source('PersistentTable.cc')
|
||||
Source('RubyPort.cc')
|
||||
Source('RubyPortProxy.cc')
|
||||
Source('Sequencer.cc')
|
||||
Source('System.cc')
|
||||
Source('TimerTable.cc')
|
||||
737
simulators/gem5/src/mem/ruby/system/Sequencer.cc
Normal file
737
simulators/gem5/src/mem/ruby/system/Sequencer.cc
Normal file
@ -0,0 +1,737 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "base/misc.hh"
|
||||
#include "base/str.hh"
|
||||
#include "config/the_isa.hh"
|
||||
#if THE_ISA == X86_ISA
|
||||
#include "arch/x86/insts/microldstop.hh"
|
||||
#endif // X86_ISA
|
||||
#include "cpu/testers/rubytest/RubyTester.hh"
|
||||
#include "debug/MemoryAccess.hh"
|
||||
#include "debug/ProtocolTrace.hh"
|
||||
#include "debug/RubySequencer.hh"
|
||||
#include "mem/protocol/PrefetchBit.hh"
|
||||
#include "mem/protocol/RubyAccessMode.hh"
|
||||
#include "mem/ruby/buffers/MessageBuffer.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/profiler/Profiler.hh"
|
||||
#include "mem/ruby/slicc_interface/RubyRequest.hh"
|
||||
#include "mem/ruby/system/CacheMemory.hh"
|
||||
#include "mem/ruby/system/Sequencer.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
#include "mem/packet.hh"
|
||||
#include "params/RubySequencer.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
Sequencer *
|
||||
RubySequencerParams::create()
|
||||
{
|
||||
return new Sequencer(this);
|
||||
}
|
||||
|
||||
Sequencer::Sequencer(const Params *p)
|
||||
: RubyPort(p), deadlockCheckEvent(this)
|
||||
{
|
||||
m_store_waiting_on_load_cycles = 0;
|
||||
m_store_waiting_on_store_cycles = 0;
|
||||
m_load_waiting_on_store_cycles = 0;
|
||||
m_load_waiting_on_load_cycles = 0;
|
||||
|
||||
m_outstanding_count = 0;
|
||||
|
||||
m_instCache_ptr = p->icache;
|
||||
m_dataCache_ptr = p->dcache;
|
||||
m_max_outstanding_requests = p->max_outstanding_requests;
|
||||
m_deadlock_threshold = p->deadlock_threshold;
|
||||
|
||||
assert(m_max_outstanding_requests > 0);
|
||||
assert(m_deadlock_threshold > 0);
|
||||
assert(m_instCache_ptr != NULL);
|
||||
assert(m_dataCache_ptr != NULL);
|
||||
|
||||
m_usingNetworkTester = p->using_network_tester;
|
||||
}
|
||||
|
||||
Sequencer::~Sequencer()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::wakeup()
|
||||
{
|
||||
// Check for deadlock of any of the requests
|
||||
Time current_time = g_eventQueue_ptr->getTime();
|
||||
|
||||
// Check across all outstanding requests
|
||||
int total_outstanding = 0;
|
||||
|
||||
RequestTable::iterator read = m_readRequestTable.begin();
|
||||
RequestTable::iterator read_end = m_readRequestTable.end();
|
||||
for (; read != read_end; ++read) {
|
||||
SequencerRequest* request = read->second;
|
||||
if (current_time - request->issue_time < m_deadlock_threshold)
|
||||
continue;
|
||||
|
||||
panic("Possible Deadlock detected. Aborting!\n"
|
||||
"version: %d request.paddr: 0x%x m_readRequestTable: %d "
|
||||
"current time: %u issue_time: %d difference: %d\n", m_version,
|
||||
Address(request->pkt->getAddr()), m_readRequestTable.size(),
|
||||
current_time, request->issue_time,
|
||||
current_time - request->issue_time);
|
||||
}
|
||||
|
||||
RequestTable::iterator write = m_writeRequestTable.begin();
|
||||
RequestTable::iterator write_end = m_writeRequestTable.end();
|
||||
for (; write != write_end; ++write) {
|
||||
SequencerRequest* request = write->second;
|
||||
if (current_time - request->issue_time < m_deadlock_threshold)
|
||||
continue;
|
||||
|
||||
panic("Possible Deadlock detected. Aborting!\n"
|
||||
"version: %d request.paddr: 0x%x m_writeRequestTable: %d "
|
||||
"current time: %u issue_time: %d difference: %d\n", m_version,
|
||||
Address(request->pkt->getAddr()), m_writeRequestTable.size(),
|
||||
current_time, request->issue_time,
|
||||
current_time - request->issue_time);
|
||||
}
|
||||
|
||||
total_outstanding += m_writeRequestTable.size();
|
||||
total_outstanding += m_readRequestTable.size();
|
||||
|
||||
assert(m_outstanding_count == total_outstanding);
|
||||
|
||||
if (m_outstanding_count > 0) {
|
||||
// If there are still outstanding requests, keep checking
|
||||
schedule(deadlockCheckEvent,
|
||||
m_deadlock_threshold * g_eventQueue_ptr->getClock() +
|
||||
curTick());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::printStats(ostream & out) const
|
||||
{
|
||||
out << "Sequencer: " << m_name << endl
|
||||
<< " store_waiting_on_load_cycles: "
|
||||
<< m_store_waiting_on_load_cycles << endl
|
||||
<< " store_waiting_on_store_cycles: "
|
||||
<< m_store_waiting_on_store_cycles << endl
|
||||
<< " load_waiting_on_load_cycles: "
|
||||
<< m_load_waiting_on_load_cycles << endl
|
||||
<< " load_waiting_on_store_cycles: "
|
||||
<< m_load_waiting_on_store_cycles << endl;
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::printProgress(ostream& out) const
|
||||
{
|
||||
#if 0
|
||||
int total_demand = 0;
|
||||
out << "Sequencer Stats Version " << m_version << endl;
|
||||
out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
|
||||
out << "---------------" << endl;
|
||||
out << "outstanding requests" << endl;
|
||||
|
||||
out << "proc " << m_Read
|
||||
<< " version Requests = " << m_readRequestTable.size() << endl;
|
||||
|
||||
// print the request table
|
||||
RequestTable::iterator read = m_readRequestTable.begin();
|
||||
RequestTable::iterator read_end = m_readRequestTable.end();
|
||||
for (; read != read_end; ++read) {
|
||||
SequencerRequest* request = read->second;
|
||||
out << "\tRequest[ " << i << " ] = " << request->type
|
||||
<< " Address " << rkeys[i]
|
||||
<< " Posted " << request->issue_time
|
||||
<< " PF " << PrefetchBit_No << endl;
|
||||
total_demand++;
|
||||
}
|
||||
|
||||
out << "proc " << m_version
|
||||
<< " Write Requests = " << m_writeRequestTable.size << endl;
|
||||
|
||||
// print the request table
|
||||
RequestTable::iterator write = m_writeRequestTable.begin();
|
||||
RequestTable::iterator write_end = m_writeRequestTable.end();
|
||||
for (; write != write_end; ++write) {
|
||||
SequencerRequest* request = write->second;
|
||||
out << "\tRequest[ " << i << " ] = " << request.getType()
|
||||
<< " Address " << wkeys[i]
|
||||
<< " Posted " << request.getTime()
|
||||
<< " PF " << request.getPrefetch() << endl;
|
||||
if (request.getPrefetch() == PrefetchBit_No) {
|
||||
total_demand++;
|
||||
}
|
||||
}
|
||||
|
||||
out << endl;
|
||||
|
||||
out << "Total Number Outstanding: " << m_outstanding_count << endl
|
||||
<< "Total Number Demand : " << total_demand << endl
|
||||
<< "Total Number Prefetches : " << m_outstanding_count - total_demand
|
||||
<< endl << endl << endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::printConfig(ostream& out) const
|
||||
{
|
||||
out << "Seqeuncer config: " << m_name << endl
|
||||
<< " controller: " << m_controller->getName() << endl
|
||||
<< " version: " << m_version << endl
|
||||
<< " max_outstanding_requests: " << m_max_outstanding_requests << endl
|
||||
<< " deadlock_threshold: " << m_deadlock_threshold << endl;
|
||||
}
|
||||
|
||||
// Insert the request on the correct request table. Return true if
|
||||
// the entry was already present.
|
||||
RequestStatus
|
||||
Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
|
||||
{
|
||||
assert(m_outstanding_count ==
|
||||
(m_writeRequestTable.size() + m_readRequestTable.size()));
|
||||
|
||||
// See if we should schedule a deadlock check
|
||||
if (deadlockCheckEvent.scheduled() == false) {
|
||||
schedule(deadlockCheckEvent,
|
||||
m_deadlock_threshold * g_eventQueue_ptr->getClock()
|
||||
+ curTick());
|
||||
}
|
||||
|
||||
Address line_addr(pkt->getAddr());
|
||||
line_addr.makeLineAddress();
|
||||
if ((request_type == RubyRequestType_ST) ||
|
||||
(request_type == RubyRequestType_RMW_Read) ||
|
||||
(request_type == RubyRequestType_RMW_Write) ||
|
||||
(request_type == RubyRequestType_Load_Linked) ||
|
||||
(request_type == RubyRequestType_Store_Conditional) ||
|
||||
(request_type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(request_type == RubyRequestType_Locked_RMW_Write) ||
|
||||
(request_type == RubyRequestType_FLUSH)) {
|
||||
|
||||
// Check if there is any outstanding read request for the same
|
||||
// cache line.
|
||||
if (m_readRequestTable.count(line_addr) > 0) {
|
||||
m_store_waiting_on_load_cycles++;
|
||||
return RequestStatus_Aliased;
|
||||
}
|
||||
|
||||
pair<RequestTable::iterator, bool> r =
|
||||
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
|
||||
if (r.second) {
|
||||
RequestTable::iterator i = r.first;
|
||||
i->second = new SequencerRequest(pkt, request_type,
|
||||
g_eventQueue_ptr->getTime());
|
||||
m_outstanding_count++;
|
||||
} else {
|
||||
// There is an outstanding write request for the cache line
|
||||
m_store_waiting_on_store_cycles++;
|
||||
return RequestStatus_Aliased;
|
||||
}
|
||||
} else {
|
||||
// Check if there is any outstanding write request for the same
|
||||
// cache line.
|
||||
if (m_writeRequestTable.count(line_addr) > 0) {
|
||||
m_load_waiting_on_store_cycles++;
|
||||
return RequestStatus_Aliased;
|
||||
}
|
||||
|
||||
pair<RequestTable::iterator, bool> r =
|
||||
m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
|
||||
|
||||
if (r.second) {
|
||||
RequestTable::iterator i = r.first;
|
||||
i->second = new SequencerRequest(pkt, request_type,
|
||||
g_eventQueue_ptr->getTime());
|
||||
m_outstanding_count++;
|
||||
} else {
|
||||
// There is an outstanding read request for the cache line
|
||||
m_load_waiting_on_load_cycles++;
|
||||
return RequestStatus_Aliased;
|
||||
}
|
||||
}
|
||||
|
||||
g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
|
||||
assert(m_outstanding_count ==
|
||||
(m_writeRequestTable.size() + m_readRequestTable.size()));
|
||||
|
||||
return RequestStatus_Ready;
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::markRemoved()
|
||||
{
|
||||
m_outstanding_count--;
|
||||
assert(m_outstanding_count ==
|
||||
m_writeRequestTable.size() + m_readRequestTable.size());
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::removeRequest(SequencerRequest* srequest)
|
||||
{
|
||||
assert(m_outstanding_count ==
|
||||
m_writeRequestTable.size() + m_readRequestTable.size());
|
||||
|
||||
Address line_addr(srequest->pkt->getAddr());
|
||||
line_addr.makeLineAddress();
|
||||
if ((srequest->m_type == RubyRequestType_ST) ||
|
||||
(srequest->m_type == RubyRequestType_RMW_Read) ||
|
||||
(srequest->m_type == RubyRequestType_RMW_Write) ||
|
||||
(srequest->m_type == RubyRequestType_Load_Linked) ||
|
||||
(srequest->m_type == RubyRequestType_Store_Conditional) ||
|
||||
(srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
|
||||
m_writeRequestTable.erase(line_addr);
|
||||
} else {
|
||||
m_readRequestTable.erase(line_addr);
|
||||
}
|
||||
|
||||
markRemoved();
|
||||
}
|
||||
|
||||
bool
|
||||
Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
|
||||
{
|
||||
//
|
||||
// The success flag indicates whether the LLSC operation was successful.
|
||||
// LL ops will always succeed, but SC may fail if the cache line is no
|
||||
// longer locked.
|
||||
//
|
||||
bool success = true;
|
||||
if (request->m_type == RubyRequestType_Store_Conditional) {
|
||||
if (!m_dataCache_ptr->isLocked(address, m_version)) {
|
||||
//
|
||||
// For failed SC requests, indicate the failure to the cpu by
|
||||
// setting the extra data to zero.
|
||||
//
|
||||
request->pkt->req->setExtraData(0);
|
||||
success = false;
|
||||
} else {
|
||||
//
|
||||
// For successful SC requests, indicate the success to the cpu by
|
||||
// setting the extra data to one.
|
||||
//
|
||||
request->pkt->req->setExtraData(1);
|
||||
}
|
||||
//
|
||||
// Independent of success, all SC operations must clear the lock
|
||||
//
|
||||
m_dataCache_ptr->clearLocked(address);
|
||||
} else if (request->m_type == RubyRequestType_Load_Linked) {
|
||||
//
|
||||
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
|
||||
// previously locked cache lines?
|
||||
//
|
||||
m_dataCache_ptr->setLocked(address, m_version);
|
||||
} else if ((m_dataCache_ptr->isTagPresent(address)) &&
|
||||
(m_dataCache_ptr->isLocked(address, m_version))) {
|
||||
//
|
||||
// Normal writes should clear the locked address
|
||||
//
|
||||
m_dataCache_ptr->clearLocked(address);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::writeCallback(const Address& address, DataBlock& data)
|
||||
{
|
||||
writeCallback(address, GenericMachineType_NULL, data);
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::writeCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data)
|
||||
{
|
||||
writeCallback(address, mach, data, 0, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::writeCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data,
|
||||
Time initialRequestTime,
|
||||
Time forwardRequestTime,
|
||||
Time firstResponseTime)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(m_writeRequestTable.count(line_address(address)));
|
||||
|
||||
RequestTable::iterator i = m_writeRequestTable.find(address);
|
||||
assert(i != m_writeRequestTable.end());
|
||||
SequencerRequest* request = i->second;
|
||||
|
||||
m_writeRequestTable.erase(i);
|
||||
markRemoved();
|
||||
|
||||
assert((request->m_type == RubyRequestType_ST) ||
|
||||
(request->m_type == RubyRequestType_ATOMIC) ||
|
||||
(request->m_type == RubyRequestType_RMW_Read) ||
|
||||
(request->m_type == RubyRequestType_RMW_Write) ||
|
||||
(request->m_type == RubyRequestType_Load_Linked) ||
|
||||
(request->m_type == RubyRequestType_Store_Conditional) ||
|
||||
(request->m_type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(request->m_type == RubyRequestType_Locked_RMW_Write) ||
|
||||
(request->m_type == RubyRequestType_FLUSH));
|
||||
|
||||
|
||||
//
|
||||
// For Alpha, properly handle LL, SC, and write requests with respect to
|
||||
// locked cache blocks.
|
||||
//
|
||||
// Not valid for Network_test protocl
|
||||
//
|
||||
bool success = true;
|
||||
if(!m_usingNetworkTester)
|
||||
success = handleLlsc(address, request);
|
||||
|
||||
if (request->m_type == RubyRequestType_Locked_RMW_Read) {
|
||||
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
|
||||
} else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
|
||||
m_controller->unblock(address);
|
||||
}
|
||||
|
||||
hitCallback(request, mach, data, success,
|
||||
initialRequestTime, forwardRequestTime, firstResponseTime);
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::readCallback(const Address& address, DataBlock& data)
|
||||
{
|
||||
readCallback(address, GenericMachineType_NULL, data);
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::readCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data)
|
||||
{
|
||||
readCallback(address, mach, data, 0, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::readCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data,
|
||||
Time initialRequestTime,
|
||||
Time forwardRequestTime,
|
||||
Time firstResponseTime)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(m_readRequestTable.count(line_address(address)));
|
||||
|
||||
RequestTable::iterator i = m_readRequestTable.find(address);
|
||||
assert(i != m_readRequestTable.end());
|
||||
SequencerRequest* request = i->second;
|
||||
|
||||
m_readRequestTable.erase(i);
|
||||
markRemoved();
|
||||
|
||||
assert((request->m_type == RubyRequestType_LD) ||
|
||||
(request->m_type == RubyRequestType_IFETCH));
|
||||
|
||||
hitCallback(request, mach, data, true,
|
||||
initialRequestTime, forwardRequestTime, firstResponseTime);
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::hitCallback(SequencerRequest* srequest,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data,
|
||||
bool success,
|
||||
Time initialRequestTime,
|
||||
Time forwardRequestTime,
|
||||
Time firstResponseTime)
|
||||
{
|
||||
PacketPtr pkt = srequest->pkt;
|
||||
Address request_address(pkt->getAddr());
|
||||
Address request_line_address(pkt->getAddr());
|
||||
request_line_address.makeLineAddress();
|
||||
RubyRequestType type = srequest->m_type;
|
||||
Time issued_time = srequest->issue_time;
|
||||
|
||||
// Set this cache entry to the most recently used
|
||||
if (type == RubyRequestType_IFETCH) {
|
||||
m_instCache_ptr->setMRU(request_line_address);
|
||||
} else {
|
||||
m_dataCache_ptr->setMRU(request_line_address);
|
||||
}
|
||||
|
||||
assert(g_eventQueue_ptr->getTime() >= issued_time);
|
||||
Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
|
||||
|
||||
// Profile the miss latency for all non-zero demand misses
|
||||
if (miss_latency != 0) {
|
||||
g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
|
||||
|
||||
if (mach == GenericMachineType_L1Cache_wCC) {
|
||||
g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
|
||||
initialRequestTime,
|
||||
forwardRequestTime,
|
||||
firstResponseTime,
|
||||
g_eventQueue_ptr->getTime());
|
||||
}
|
||||
|
||||
if (mach == GenericMachineType_Directory) {
|
||||
g_system_ptr->getProfiler()->missLatencyDir(issued_time,
|
||||
initialRequestTime,
|
||||
forwardRequestTime,
|
||||
firstResponseTime,
|
||||
g_eventQueue_ptr->getTime());
|
||||
}
|
||||
|
||||
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
|
||||
curTick(), m_version, "Seq",
|
||||
success ? "Done" : "SC_Failed", "", "",
|
||||
request_address, miss_latency);
|
||||
}
|
||||
|
||||
// update the data
|
||||
if (g_system_ptr->m_warmup_enabled) {
|
||||
assert(pkt->getPtr<uint8_t>(false) != NULL);
|
||||
data.setData(pkt->getPtr<uint8_t>(false),
|
||||
request_address.getOffset(), pkt->getSize());
|
||||
} else if (pkt->getPtr<uint8_t>(true) != NULL) {
|
||||
if ((type == RubyRequestType_LD) ||
|
||||
(type == RubyRequestType_IFETCH) ||
|
||||
(type == RubyRequestType_RMW_Read) ||
|
||||
(type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(type == RubyRequestType_Load_Linked)) {
|
||||
memcpy(pkt->getPtr<uint8_t>(true),
|
||||
data.getData(request_address.getOffset(), pkt->getSize()),
|
||||
pkt->getSize());
|
||||
} else {
|
||||
data.setData(pkt->getPtr<uint8_t>(true),
|
||||
request_address.getOffset(), pkt->getSize());
|
||||
}
|
||||
} else {
|
||||
DPRINTF(MemoryAccess,
|
||||
"WARNING. Data not transfered from Ruby to M5 for type %s\n",
|
||||
RubyRequestType_to_string(type));
|
||||
}
|
||||
|
||||
// If using the RubyTester, update the RubyTester sender state's
|
||||
// subBlock with the recieved data. The tester will later access
|
||||
// this state.
|
||||
// Note: RubyPort will access it's sender state before the
|
||||
// RubyTester.
|
||||
if (m_usingRubyTester) {
|
||||
RubyPort::SenderState *requestSenderState =
|
||||
safe_cast<RubyPort::SenderState*>(pkt->senderState);
|
||||
RubyTester::SenderState* testerSenderState =
|
||||
safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
|
||||
testerSenderState->subBlock->mergeFrom(data);
|
||||
}
|
||||
|
||||
delete srequest;
|
||||
|
||||
if (g_system_ptr->m_warmup_enabled) {
|
||||
delete pkt;
|
||||
g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
|
||||
} else if (g_system_ptr->m_cooldown_enabled) {
|
||||
delete pkt;
|
||||
g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
|
||||
} else {
|
||||
ruby_hit_callback(pkt);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
Sequencer::empty() const
|
||||
{
|
||||
return m_writeRequestTable.empty() && m_readRequestTable.empty();
|
||||
}
|
||||
|
||||
RequestStatus
|
||||
Sequencer::makeRequest(PacketPtr pkt)
|
||||
{
|
||||
if (m_outstanding_count >= m_max_outstanding_requests) {
|
||||
return RequestStatus_BufferFull;
|
||||
}
|
||||
|
||||
RubyRequestType primary_type = RubyRequestType_NULL;
|
||||
RubyRequestType secondary_type = RubyRequestType_NULL;
|
||||
|
||||
if (pkt->isLLSC()) {
|
||||
//
|
||||
// Alpha LL/SC instructions need to be handled carefully by the cache
|
||||
// coherence protocol to ensure they follow the proper semantics. In
|
||||
// particular, by identifying the operations as atomic, the protocol
|
||||
// should understand that migratory sharing optimizations should not
|
||||
// be performed (i.e. a load between the LL and SC should not steal
|
||||
// away exclusive permission).
|
||||
//
|
||||
if (pkt->isWrite()) {
|
||||
DPRINTF(RubySequencer, "Issuing SC\n");
|
||||
primary_type = RubyRequestType_Store_Conditional;
|
||||
} else {
|
||||
DPRINTF(RubySequencer, "Issuing LL\n");
|
||||
assert(pkt->isRead());
|
||||
primary_type = RubyRequestType_Load_Linked;
|
||||
}
|
||||
secondary_type = RubyRequestType_ATOMIC;
|
||||
} else if (pkt->req->isLocked()) {
|
||||
//
|
||||
// x86 locked instructions are translated to store cache coherence
|
||||
// requests because these requests should always be treated as read
|
||||
// exclusive operations and should leverage any migratory sharing
|
||||
// optimization built into the protocol.
|
||||
//
|
||||
if (pkt->isWrite()) {
|
||||
DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
|
||||
primary_type = RubyRequestType_Locked_RMW_Write;
|
||||
} else {
|
||||
DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
|
||||
assert(pkt->isRead());
|
||||
primary_type = RubyRequestType_Locked_RMW_Read;
|
||||
}
|
||||
secondary_type = RubyRequestType_ST;
|
||||
} else {
|
||||
if (pkt->isRead()) {
|
||||
if (pkt->req->isInstFetch()) {
|
||||
primary_type = secondary_type = RubyRequestType_IFETCH;
|
||||
} else {
|
||||
#if THE_ISA == X86_ISA
|
||||
uint32_t flags = pkt->req->getFlags();
|
||||
bool storeCheck = flags &
|
||||
(TheISA::StoreCheck << TheISA::FlagShift);
|
||||
#else
|
||||
bool storeCheck = false;
|
||||
#endif // X86_ISA
|
||||
if (storeCheck) {
|
||||
primary_type = RubyRequestType_RMW_Read;
|
||||
secondary_type = RubyRequestType_ST;
|
||||
} else {
|
||||
primary_type = secondary_type = RubyRequestType_LD;
|
||||
}
|
||||
}
|
||||
} else if (pkt->isWrite()) {
|
||||
//
|
||||
// Note: M5 packets do not differentiate ST from RMW_Write
|
||||
//
|
||||
primary_type = secondary_type = RubyRequestType_ST;
|
||||
} else if (pkt->isFlush()) {
|
||||
primary_type = secondary_type = RubyRequestType_FLUSH;
|
||||
} else {
|
||||
panic("Unsupported ruby packet type\n");
|
||||
}
|
||||
}
|
||||
|
||||
RequestStatus status = insertRequest(pkt, primary_type);
|
||||
if (status != RequestStatus_Ready)
|
||||
return status;
|
||||
|
||||
issueRequest(pkt, secondary_type);
|
||||
|
||||
// TODO: issue hardware prefetches here
|
||||
return RequestStatus_Issued;
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
|
||||
{
|
||||
int proc_id = -1;
|
||||
if (pkt != NULL && pkt->req->hasContextId()) {
|
||||
proc_id = pkt->req->contextId();
|
||||
}
|
||||
|
||||
// If valid, copy the pc to the ruby request
|
||||
Addr pc = 0;
|
||||
if (pkt->req->hasPC()) {
|
||||
pc = pkt->req->getPC();
|
||||
}
|
||||
|
||||
RubyRequest *msg = new RubyRequest(pkt->getAddr(),
|
||||
pkt->getPtr<uint8_t>(true),
|
||||
pkt->getSize(), pc, secondary_type,
|
||||
RubyAccessMode_Supervisor, pkt,
|
||||
PrefetchBit_No, proc_id);
|
||||
|
||||
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
|
||||
curTick(), m_version, "Seq", "Begin", "", "",
|
||||
msg->getPhysicalAddress(),
|
||||
RubyRequestType_to_string(secondary_type));
|
||||
|
||||
Time latency = 0; // initialzed to an null value
|
||||
|
||||
if (secondary_type == RubyRequestType_IFETCH)
|
||||
latency = m_instCache_ptr->getLatency();
|
||||
else
|
||||
latency = m_dataCache_ptr->getLatency();
|
||||
|
||||
// Send the message to the cache controller
|
||||
assert(latency > 0);
|
||||
|
||||
assert(m_mandatory_q_ptr != NULL);
|
||||
m_mandatory_q_ptr->enqueue(msg, latency);
|
||||
}
|
||||
|
||||
template <class KEY, class VALUE>
|
||||
std::ostream &
|
||||
operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
|
||||
{
|
||||
typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
|
||||
typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
|
||||
|
||||
out << "[";
|
||||
for (; i != end; ++i)
|
||||
out << " " << i->first << "=" << i->second;
|
||||
out << " ]";
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::print(ostream& out) const
|
||||
{
|
||||
out << "[Sequencer: " << m_version
|
||||
<< ", outstanding requests: " << m_outstanding_count
|
||||
<< ", read request table: " << m_readRequestTable
|
||||
<< ", write request table: " << m_writeRequestTable
|
||||
<< "]";
|
||||
}
|
||||
|
||||
// this can be called from setState whenever coherence permissions are
|
||||
// upgraded when invoked, coherence violations will be checked for the
|
||||
// given block
|
||||
void
|
||||
Sequencer::checkCoherence(const Address& addr)
|
||||
{
|
||||
#ifdef CHECK_COHERENCE
|
||||
g_system_ptr->checkGlobalCoherenceInvariant(addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
Sequencer::evictionCallback(const Address& address)
|
||||
{
|
||||
ruby_eviction_callback(address);
|
||||
}
|
||||
184
simulators/gem5/src/mem/ruby/system/Sequencer.hh
Normal file
184
simulators/gem5/src/mem/ruby/system/Sequencer.hh
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_SEQUENCER_HH__
|
||||
#define __MEM_RUBY_SYSTEM_SEQUENCER_HH__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "base/hashmap.hh"
|
||||
#include "mem/protocol/GenericMachineType.hh"
|
||||
#include "mem/protocol/RubyRequestType.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/common/Consumer.hh"
|
||||
#include "mem/ruby/system/RubyPort.hh"
|
||||
|
||||
class DataBlock;
|
||||
class CacheMemory;
|
||||
|
||||
struct RubySequencerParams;
|
||||
|
||||
struct SequencerRequest
|
||||
{
|
||||
PacketPtr pkt;
|
||||
RubyRequestType m_type;
|
||||
Time issue_time;
|
||||
|
||||
SequencerRequest(PacketPtr _pkt, RubyRequestType _m_type, Time _issue_time)
|
||||
: pkt(_pkt), m_type(_m_type), issue_time(_issue_time)
|
||||
{}
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const SequencerRequest& obj);
|
||||
|
||||
class Sequencer : public RubyPort, public Consumer
|
||||
{
|
||||
public:
|
||||
typedef RubySequencerParams Params;
|
||||
Sequencer(const Params *);
|
||||
~Sequencer();
|
||||
|
||||
// Public Methods
|
||||
void wakeup(); // Used only for deadlock detection
|
||||
|
||||
void printConfig(std::ostream& out) const;
|
||||
|
||||
void printProgress(std::ostream& out) const;
|
||||
|
||||
void writeCallback(const Address& address, DataBlock& data);
|
||||
|
||||
void writeCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data);
|
||||
|
||||
void writeCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data,
|
||||
Time initialRequestTime,
|
||||
Time forwardRequestTime,
|
||||
Time firstResponseTime);
|
||||
|
||||
void readCallback(const Address& address, DataBlock& data);
|
||||
|
||||
void readCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data);
|
||||
|
||||
void readCallback(const Address& address,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data,
|
||||
Time initialRequestTime,
|
||||
Time forwardRequestTime,
|
||||
Time firstResponseTime);
|
||||
|
||||
RequestStatus makeRequest(PacketPtr pkt);
|
||||
bool empty() const;
|
||||
int outstandingCount() const { return m_outstanding_count; }
|
||||
bool
|
||||
isDeadlockEventScheduled() const
|
||||
{
|
||||
return deadlockCheckEvent.scheduled();
|
||||
}
|
||||
|
||||
void
|
||||
descheduleDeadlockEvent()
|
||||
{
|
||||
deschedule(deadlockCheckEvent);
|
||||
}
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
void printStats(std::ostream& out) const;
|
||||
void checkCoherence(const Address& address);
|
||||
|
||||
void markRemoved();
|
||||
void removeRequest(SequencerRequest* request);
|
||||
void evictionCallback(const Address& address);
|
||||
|
||||
private:
|
||||
void issueRequest(PacketPtr pkt, RubyRequestType type);
|
||||
|
||||
void hitCallback(SequencerRequest* request,
|
||||
GenericMachineType mach,
|
||||
DataBlock& data,
|
||||
bool success,
|
||||
Time initialRequestTime,
|
||||
Time forwardRequestTime,
|
||||
Time firstResponseTime);
|
||||
|
||||
RequestStatus insertRequest(PacketPtr pkt, RubyRequestType request_type);
|
||||
|
||||
bool handleLlsc(const Address& address, SequencerRequest* request);
|
||||
|
||||
// Private copy constructor and assignment operator
|
||||
Sequencer(const Sequencer& obj);
|
||||
Sequencer& operator=(const Sequencer& obj);
|
||||
|
||||
private:
|
||||
int m_max_outstanding_requests;
|
||||
int m_deadlock_threshold;
|
||||
|
||||
CacheMemory* m_dataCache_ptr;
|
||||
CacheMemory* m_instCache_ptr;
|
||||
|
||||
typedef m5::hash_map<Address, SequencerRequest*> RequestTable;
|
||||
RequestTable m_writeRequestTable;
|
||||
RequestTable m_readRequestTable;
|
||||
// Global outstanding request count, across all request tables
|
||||
int m_outstanding_count;
|
||||
bool m_deadlock_check_scheduled;
|
||||
|
||||
int m_store_waiting_on_load_cycles;
|
||||
int m_store_waiting_on_store_cycles;
|
||||
int m_load_waiting_on_store_cycles;
|
||||
int m_load_waiting_on_load_cycles;
|
||||
|
||||
bool m_usingNetworkTester;
|
||||
|
||||
class SequencerWakeupEvent : public Event
|
||||
{
|
||||
private:
|
||||
Sequencer *m_sequencer_ptr;
|
||||
|
||||
public:
|
||||
SequencerWakeupEvent(Sequencer *_seq) : m_sequencer_ptr(_seq) {}
|
||||
void process() { m_sequencer_ptr->wakeup(); }
|
||||
const char *description() const { return "Sequencer deadlock check"; }
|
||||
};
|
||||
|
||||
SequencerWakeupEvent deadlockCheckEvent;
|
||||
};
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const Sequencer& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_SEQUENCER_HH__
|
||||
65
simulators/gem5/src/mem/ruby/system/Sequencer.py
Normal file
65
simulators/gem5/src/mem/ruby/system/Sequencer.py
Normal file
@ -0,0 +1,65 @@
|
||||
# Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Steve Reinhardt
|
||||
# Brad Beckmann
|
||||
|
||||
from m5.params import *
|
||||
from m5.proxy import *
|
||||
from MemObject import MemObject
|
||||
|
||||
class RubyPort(MemObject):
|
||||
type = 'RubyPort'
|
||||
abstract = True
|
||||
slave = VectorSlavePort("CPU slave port")
|
||||
master = VectorMasterPort("CPU master port")
|
||||
version = Param.Int(0, "")
|
||||
pio_port = MasterPort("Ruby_pio_port")
|
||||
using_ruby_tester = Param.Bool(False, "")
|
||||
using_network_tester = Param.Bool(False, "")
|
||||
access_phys_mem = Param.Bool(True,
|
||||
"should the rubyport atomically update phys_mem")
|
||||
ruby_system = Param.RubySystem("")
|
||||
system = Param.System(Parent.any, "system object")
|
||||
support_data_reqs = Param.Bool(True, "data cache requests supported")
|
||||
support_inst_reqs = Param.Bool(True, "inst cache requests supported")
|
||||
|
||||
|
||||
class RubyPortProxy(RubyPort):
|
||||
type = 'RubyPortProxy'
|
||||
|
||||
class RubySequencer(RubyPort):
|
||||
type = 'RubySequencer'
|
||||
cxx_class = 'Sequencer'
|
||||
icache = Param.RubyCache("")
|
||||
dcache = Param.RubyCache("")
|
||||
max_outstanding_requests = Param.Int(16,
|
||||
"max requests (incl. prefetches) outstanding")
|
||||
deadlock_threshold = Param.Int(500000,
|
||||
"max outstanding cycles for a request before deadlock/livelock declared")
|
||||
|
||||
class DMASequencer(RubyPort):
|
||||
type = 'DMASequencer'
|
||||
429
simulators/gem5/src/mem/ruby/system/SparseMemory.cc
Normal file
429
simulators/gem5/src/mem/ruby/system/SparseMemory.cc
Normal file
@ -0,0 +1,429 @@
|
||||
/*
|
||||
* Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
* Copyright (c) 2012 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <queue>
|
||||
|
||||
#include "debug/RubyCache.hh"
|
||||
#include "mem/ruby/system/SparseMemory.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
SparseMemory::SparseMemory(int number_of_levels)
|
||||
{
|
||||
int even_level_bits;
|
||||
int extra;
|
||||
m_total_number_of_bits = RubySystem::getMemorySizeBits()
|
||||
- RubySystem::getBlockSizeBits();;
|
||||
|
||||
m_number_of_levels = number_of_levels;
|
||||
|
||||
//
|
||||
// Create the array that describes the bits per level
|
||||
//
|
||||
m_number_of_bits_per_level = new int[m_number_of_levels];
|
||||
even_level_bits = m_total_number_of_bits / m_number_of_levels;
|
||||
extra = m_total_number_of_bits % m_number_of_levels;
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
if (level < extra)
|
||||
m_number_of_bits_per_level[level] = even_level_bits + 1;
|
||||
else
|
||||
m_number_of_bits_per_level[level] = even_level_bits;
|
||||
}
|
||||
m_map_head = new SparseMapType;
|
||||
|
||||
m_total_adds = 0;
|
||||
m_total_removes = 0;
|
||||
m_adds_per_level = new uint64_t[m_number_of_levels];
|
||||
m_removes_per_level = new uint64_t[m_number_of_levels];
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
m_adds_per_level[level] = 0;
|
||||
m_removes_per_level[level] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
SparseMemory::~SparseMemory()
|
||||
{
|
||||
recursivelyRemoveTables(m_map_head, 0);
|
||||
delete m_map_head;
|
||||
delete [] m_number_of_bits_per_level;
|
||||
delete [] m_adds_per_level;
|
||||
delete [] m_removes_per_level;
|
||||
}
|
||||
|
||||
// Recursively search table hierarchy for the lowest level table.
|
||||
// Delete the lowest table first, the tables above
|
||||
void
|
||||
SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel)
|
||||
{
|
||||
SparseMapType::iterator iter;
|
||||
|
||||
for (iter = curTable->begin(); iter != curTable->end(); iter++) {
|
||||
SparseMemEntry entry = (*iter).second;
|
||||
|
||||
if (curLevel != (m_number_of_levels - 1)) {
|
||||
// If the not at the last level, analyze those lower level
|
||||
// tables first, then delete those next tables
|
||||
SparseMapType* nextTable = (SparseMapType*)(entry);
|
||||
recursivelyRemoveTables(nextTable, (curLevel + 1));
|
||||
delete nextTable;
|
||||
} else {
|
||||
// If at the last level, delete the directory entry
|
||||
delete (AbstractEntry*)(entry);
|
||||
}
|
||||
entry = NULL;
|
||||
}
|
||||
|
||||
// Once all entries have been deleted, erase the entries
|
||||
curTable->erase(curTable->begin(), curTable->end());
|
||||
}
|
||||
|
||||
// tests to see if an address is present in the memory
|
||||
bool
|
||||
SparseMemory::exist(const Address& address) const
|
||||
{
|
||||
SparseMapType* curTable = m_map_head;
|
||||
Address curAddress;
|
||||
|
||||
// Initiallize the high bit to be the total number of bits plus
|
||||
// the block offset. However the highest bit index is one less
|
||||
// than this value.
|
||||
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
|
||||
int lowBit;
|
||||
assert(address == line_address(address));
|
||||
DPRINTF(RubyCache, "address: %s\n", address);
|
||||
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
// Create the appropriate sub address for this level
|
||||
// Note: that set Address is inclusive of the specified range,
|
||||
// thus the high bit is one less than the total number of bits
|
||||
// used to create the address.
|
||||
lowBit = highBit - m_number_of_bits_per_level[level];
|
||||
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
|
||||
|
||||
DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, "
|
||||
"curAddress: %s\n",
|
||||
level, lowBit, highBit - 1, curAddress);
|
||||
|
||||
// Adjust the highBit value for the next level
|
||||
highBit -= m_number_of_bits_per_level[level];
|
||||
|
||||
// If the address is found, move on to the next level.
|
||||
// Otherwise, return not found
|
||||
if (curTable->count(curAddress) != 0) {
|
||||
curTable = (SparseMapType*)((*curTable)[curAddress]);
|
||||
} else {
|
||||
DPRINTF(RubyCache, "Not found\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
DPRINTF(RubyCache, "Entry found\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
// add an address to memory
|
||||
void
|
||||
SparseMemory::add(const Address& address, AbstractEntry* entry)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(!exist(address));
|
||||
|
||||
m_total_adds++;
|
||||
|
||||
Address curAddress;
|
||||
SparseMapType* curTable = m_map_head;
|
||||
|
||||
// Initiallize the high bit to be the total number of bits plus
|
||||
// the block offset. However the highest bit index is one less
|
||||
// than this value.
|
||||
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
|
||||
int lowBit;
|
||||
void* newEntry = NULL;
|
||||
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
// create the appropriate address for this level
|
||||
// Note: that set Address is inclusive of the specified range,
|
||||
// thus the high bit is one less than the total number of bits
|
||||
// used to create the address.
|
||||
lowBit = highBit - m_number_of_bits_per_level[level];
|
||||
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
|
||||
|
||||
// Adjust the highBit value for the next level
|
||||
highBit -= m_number_of_bits_per_level[level];
|
||||
|
||||
// if the address exists in the cur table, move on. Otherwise
|
||||
// create a new table.
|
||||
if (curTable->count(curAddress) != 0) {
|
||||
curTable = (SparseMapType*)((*curTable)[curAddress]);
|
||||
} else {
|
||||
m_adds_per_level[level]++;
|
||||
|
||||
// if the last level, add a directory entry. Otherwise add a map.
|
||||
if (level == (m_number_of_levels - 1)) {
|
||||
entry->getDataBlk().clear();
|
||||
newEntry = (void*)entry;
|
||||
} else {
|
||||
SparseMapType* tempMap = new SparseMapType;
|
||||
newEntry = (void*)(tempMap);
|
||||
}
|
||||
|
||||
// Create the pointer container SparseMemEntry and add it
|
||||
// to the table.
|
||||
(*curTable)[curAddress] = newEntry;
|
||||
|
||||
// Move to the next level of the heirarchy
|
||||
curTable = (SparseMapType*)newEntry;
|
||||
}
|
||||
}
|
||||
|
||||
assert(exist(address));
|
||||
return;
|
||||
}
|
||||
|
||||
// recursively search table hierarchy for the lowest level table.
|
||||
// remove the lowest entry and any empty tables above it.
|
||||
int
|
||||
SparseMemory::recursivelyRemoveLevels(const Address& address,
|
||||
CurNextInfo& curInfo)
|
||||
{
|
||||
Address curAddress;
|
||||
CurNextInfo nextInfo;
|
||||
SparseMemEntry entry;
|
||||
|
||||
// create the appropriate address for this level
|
||||
// Note: that set Address is inclusive of the specified range,
|
||||
// thus the high bit is one less than the total number of bits
|
||||
// used to create the address.
|
||||
curAddress.setAddress(address.bitSelect(curInfo.lowBit,
|
||||
curInfo.highBit - 1));
|
||||
|
||||
DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, "
|
||||
"curInfo.highBit - 1: %d, curAddress: %s\n",
|
||||
address, curInfo.level, curInfo.lowBit,
|
||||
curInfo.highBit - 1, curAddress);
|
||||
|
||||
assert(curInfo.curTable->count(curAddress) != 0);
|
||||
|
||||
entry = (*(curInfo.curTable))[curAddress];
|
||||
|
||||
if (curInfo.level < (m_number_of_levels - 1)) {
|
||||
// set up next level's info
|
||||
nextInfo.curTable = (SparseMapType*)(entry);
|
||||
nextInfo.level = curInfo.level + 1;
|
||||
|
||||
nextInfo.highBit = curInfo.highBit -
|
||||
m_number_of_bits_per_level[curInfo.level];
|
||||
|
||||
nextInfo.lowBit = curInfo.lowBit -
|
||||
m_number_of_bits_per_level[curInfo.level + 1];
|
||||
|
||||
// recursively search the table hierarchy
|
||||
int tableSize = recursivelyRemoveLevels(address, nextInfo);
|
||||
|
||||
// If this table below is now empty, we must delete it and
|
||||
// erase it from our table.
|
||||
if (tableSize == 0) {
|
||||
m_removes_per_level[curInfo.level]++;
|
||||
delete nextInfo.curTable;
|
||||
entry = NULL;
|
||||
curInfo.curTable->erase(curAddress);
|
||||
}
|
||||
} else {
|
||||
// if this is the last level, we have reached the Directory
|
||||
// Entry and thus we should delete it including the
|
||||
// SparseMemEntry container struct.
|
||||
delete (AbstractEntry*)(entry);
|
||||
entry = NULL;
|
||||
curInfo.curTable->erase(curAddress);
|
||||
m_removes_per_level[curInfo.level]++;
|
||||
}
|
||||
return curInfo.curTable->size();
|
||||
}
|
||||
|
||||
// remove an entry from the table
|
||||
void
|
||||
SparseMemory::remove(const Address& address)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(exist(address));
|
||||
|
||||
m_total_removes++;
|
||||
|
||||
CurNextInfo nextInfo;
|
||||
|
||||
// Initialize table pointer and level value
|
||||
nextInfo.curTable = m_map_head;
|
||||
nextInfo.level = 0;
|
||||
|
||||
// Initiallize the high bit to be the total number of bits plus
|
||||
// the block offset. However the highest bit index is one less
|
||||
// than this value.
|
||||
nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
|
||||
nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];;
|
||||
|
||||
// recursively search the table hierarchy for empty tables
|
||||
// starting from the level 0. Note we do not check the return
|
||||
// value because the head table is never deleted;
|
||||
recursivelyRemoveLevels(address, nextInfo);
|
||||
|
||||
assert(!exist(address));
|
||||
return;
|
||||
}
|
||||
|
||||
// looks an address up in memory
|
||||
AbstractEntry*
|
||||
SparseMemory::lookup(const Address& address)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
|
||||
Address curAddress;
|
||||
SparseMapType* curTable = m_map_head;
|
||||
AbstractEntry* entry = NULL;
|
||||
|
||||
// Initiallize the high bit to be the total number of bits plus
|
||||
// the block offset. However the highest bit index is one less
|
||||
// than this value.
|
||||
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
|
||||
int lowBit;
|
||||
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
// create the appropriate address for this level
|
||||
// Note: that set Address is inclusive of the specified range,
|
||||
// thus the high bit is one less than the total number of bits
|
||||
// used to create the address.
|
||||
lowBit = highBit - m_number_of_bits_per_level[level];
|
||||
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
|
||||
|
||||
DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, "
|
||||
"curAddress: %s\n",
|
||||
level, lowBit, highBit - 1, curAddress);
|
||||
|
||||
// Adjust the highBit value for the next level
|
||||
highBit -= m_number_of_bits_per_level[level];
|
||||
|
||||
// If the address is found, move on to the next level.
|
||||
// Otherwise, return not found
|
||||
if (curTable->count(curAddress) != 0) {
|
||||
curTable = (SparseMapType*)((*curTable)[curAddress]);
|
||||
} else {
|
||||
DPRINTF(RubyCache, "Not found\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// The last entry actually points to the Directory entry not a table
|
||||
entry = (AbstractEntry*)curTable;
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
void
|
||||
SparseMemory::recordBlocks(int cntrl_id, CacheRecorder* tr) const
|
||||
{
|
||||
queue<SparseMapType*> unexplored_nodes[2];
|
||||
queue<physical_address_t> address_of_nodes[2];
|
||||
|
||||
unexplored_nodes[0].push(m_map_head);
|
||||
address_of_nodes[0].push(0);
|
||||
|
||||
int parity_of_level = 0;
|
||||
physical_address_t address, temp_address;
|
||||
Address curAddress;
|
||||
|
||||
// Initiallize the high bit to be the total number of bits plus
|
||||
// the block offset. However the highest bit index is one less
|
||||
// than this value.
|
||||
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
|
||||
int lowBit;
|
||||
|
||||
for (int cur_level = 0; cur_level < m_number_of_levels; cur_level++) {
|
||||
|
||||
// create the appropriate address for this level
|
||||
// Note: that set Address is inclusive of the specified range,
|
||||
// thus the high bit is one less than the total number of bits
|
||||
// used to create the address.
|
||||
lowBit = highBit - m_number_of_bits_per_level[cur_level];
|
||||
|
||||
while (!unexplored_nodes[parity_of_level].empty()) {
|
||||
|
||||
SparseMapType* node = unexplored_nodes[parity_of_level].front();
|
||||
unexplored_nodes[parity_of_level].pop();
|
||||
|
||||
address = address_of_nodes[parity_of_level].front();
|
||||
address_of_nodes[parity_of_level].pop();
|
||||
|
||||
SparseMapType::iterator iter;
|
||||
|
||||
for (iter = node->begin(); iter != node->end(); iter++) {
|
||||
SparseMemEntry entry = (*iter).second;
|
||||
curAddress = (*iter).first;
|
||||
|
||||
if (cur_level != (m_number_of_levels - 1)) {
|
||||
// If not at the last level, put this node in the queue
|
||||
unexplored_nodes[1 - parity_of_level].push(
|
||||
(SparseMapType*)(entry));
|
||||
address_of_nodes[1 - parity_of_level].push(address |
|
||||
(curAddress.getAddress() << lowBit));
|
||||
} else {
|
||||
// If at the last level, add a trace record
|
||||
temp_address = address | (curAddress.getAddress()
|
||||
<< lowBit);
|
||||
DataBlock block = ((AbstractEntry*)entry)->getDataBlk();
|
||||
tr->addRecord(cntrl_id, temp_address, 0, RubyRequestType_ST, 0,
|
||||
block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust the highBit value for the next level
|
||||
highBit -= m_number_of_bits_per_level[cur_level];
|
||||
parity_of_level = 1 - parity_of_level;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SparseMemory::print(ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
SparseMemory::printStats(ostream& out) const
|
||||
{
|
||||
out << "total_adds: " << m_total_adds << " [";
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
out << m_adds_per_level[level] << " ";
|
||||
}
|
||||
out << "]" << endl;
|
||||
out << "total_removes: " << m_total_removes << " [";
|
||||
for (int level = 0; level < m_number_of_levels; level++) {
|
||||
out << m_removes_per_level[level] << " ";
|
||||
}
|
||||
out << "]" << endl;
|
||||
}
|
||||
103
simulators/gem5/src/mem/ruby/system/SparseMemory.hh
Normal file
103
simulators/gem5/src/mem/ruby/system/SparseMemory.hh
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||
* Copyright (c) 2012 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
|
||||
#define __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "base/hashmap.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/recorder/CacheRecorder.hh"
|
||||
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
|
||||
|
||||
typedef void* SparseMemEntry;
|
||||
typedef m5::hash_map<Address, SparseMemEntry> SparseMapType;
|
||||
|
||||
struct CurNextInfo
|
||||
{
|
||||
SparseMapType* curTable;
|
||||
int level;
|
||||
int highBit;
|
||||
int lowBit;
|
||||
};
|
||||
|
||||
class SparseMemory
|
||||
{
|
||||
public:
|
||||
SparseMemory(int number_of_levels);
|
||||
~SparseMemory();
|
||||
|
||||
void printConfig(std::ostream& out) { }
|
||||
|
||||
bool exist(const Address& address) const;
|
||||
void add(const Address& address, AbstractEntry*);
|
||||
void remove(const Address& address);
|
||||
|
||||
/*!
|
||||
* Function for recording the contents of memory. This function walks
|
||||
* through all the levels of the sparse memory in a breadth first
|
||||
* fashion. This might need more memory than a depth first approach.
|
||||
* But breadth first seems easier to me than a depth first approach.
|
||||
*/
|
||||
void recordBlocks(int cntrl_id, CacheRecorder *) const;
|
||||
|
||||
AbstractEntry* lookup(const Address& address);
|
||||
|
||||
// Print cache contents
|
||||
void print(std::ostream& out) const;
|
||||
void printStats(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
// Private Methods
|
||||
|
||||
// Private copy constructor and assignment operator
|
||||
SparseMemory(const SparseMemory& obj);
|
||||
SparseMemory& operator=(const SparseMemory& obj);
|
||||
|
||||
// Used by destructor to recursively remove all tables
|
||||
void recursivelyRemoveTables(SparseMapType* currentTable, int level);
|
||||
|
||||
// recursive search for address and remove associated entries
|
||||
int recursivelyRemoveLevels(const Address& address, CurNextInfo& curInfo);
|
||||
|
||||
// Data Members (m_prefix)
|
||||
SparseMapType* m_map_head;
|
||||
|
||||
int m_total_number_of_bits;
|
||||
int m_number_of_levels;
|
||||
int* m_number_of_bits_per_level;
|
||||
|
||||
uint64_t m_total_adds;
|
||||
uint64_t m_total_removes;
|
||||
uint64_t* m_adds_per_level;
|
||||
uint64_t* m_removes_per_level;
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
|
||||
486
simulators/gem5/src/mem/ruby/system/System.cc
Normal file
486
simulators/gem5/src/mem/ruby/system/System.cc
Normal file
@ -0,0 +1,486 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2011 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <zlib.h>
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include "base/intmath.hh"
|
||||
#include "base/output.hh"
|
||||
#include "debug/RubyCacheTrace.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/network/Network.hh"
|
||||
#include "mem/ruby/profiler/Profiler.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
#include "sim/eventq.hh"
|
||||
#include "sim/simulate.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
int RubySystem::m_random_seed;
|
||||
bool RubySystem::m_randomization;
|
||||
Tick RubySystem::m_clock;
|
||||
int RubySystem::m_block_size_bytes;
|
||||
int RubySystem::m_block_size_bits;
|
||||
uint64 RubySystem::m_memory_size_bytes;
|
||||
int RubySystem::m_memory_size_bits;
|
||||
|
||||
Network* RubySystem::m_network_ptr;
|
||||
Profiler* RubySystem::m_profiler_ptr;
|
||||
MemoryVector* RubySystem::m_mem_vec_ptr;
|
||||
|
||||
RubySystem::RubySystem(const Params *p)
|
||||
: SimObject(p)
|
||||
{
|
||||
if (g_system_ptr != NULL)
|
||||
fatal("Only one RubySystem object currently allowed.\n");
|
||||
|
||||
m_random_seed = p->random_seed;
|
||||
srandom(m_random_seed);
|
||||
m_randomization = p->randomization;
|
||||
m_clock = p->clock;
|
||||
|
||||
m_block_size_bytes = p->block_size_bytes;
|
||||
assert(isPowerOf2(m_block_size_bytes));
|
||||
m_block_size_bits = floorLog2(m_block_size_bytes);
|
||||
|
||||
m_memory_size_bytes = p->mem_size;
|
||||
if (m_memory_size_bytes == 0) {
|
||||
m_memory_size_bits = 0;
|
||||
} else {
|
||||
m_memory_size_bits = floorLog2(m_memory_size_bytes);
|
||||
}
|
||||
|
||||
g_eventQueue_ptr = new RubyEventQueue(p->eventq, m_clock);
|
||||
g_system_ptr = this;
|
||||
if (p->no_mem_vec) {
|
||||
m_mem_vec_ptr = NULL;
|
||||
} else {
|
||||
m_mem_vec_ptr = new MemoryVector;
|
||||
m_mem_vec_ptr->resize(m_memory_size_bytes);
|
||||
}
|
||||
|
||||
//
|
||||
// Print ruby configuration and stats at exit
|
||||
//
|
||||
RubyExitCallback* rubyExitCB = new RubyExitCallback(p->stats_filename);
|
||||
registerExitCallback(rubyExitCB);
|
||||
m_warmup_enabled = false;
|
||||
m_cooldown_enabled = false;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::init()
|
||||
{
|
||||
m_profiler_ptr->clearStats();
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::registerNetwork(Network* network_ptr)
|
||||
{
|
||||
m_network_ptr = network_ptr;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::registerProfiler(Profiler* profiler_ptr)
|
||||
{
|
||||
m_profiler_ptr = profiler_ptr;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::registerAbstractController(AbstractController* cntrl)
|
||||
{
|
||||
m_abs_cntrl_vec.push_back(cntrl);
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::registerSparseMemory(SparseMemory* s)
|
||||
{
|
||||
m_sparse_memory_vector.push_back(s);
|
||||
}
|
||||
|
||||
RubySystem::~RubySystem()
|
||||
{
|
||||
delete m_network_ptr;
|
||||
delete m_profiler_ptr;
|
||||
if (m_mem_vec_ptr)
|
||||
delete m_mem_vec_ptr;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::printSystemConfig(ostream & out)
|
||||
{
|
||||
out << "RubySystem config:" << endl
|
||||
<< " random_seed: " << m_random_seed << endl
|
||||
<< " randomization: " << m_randomization << endl
|
||||
<< " cycle_period: " << m_clock << endl
|
||||
<< " block_size_bytes: " << m_block_size_bytes << endl
|
||||
<< " block_size_bits: " << m_block_size_bits << endl
|
||||
<< " memory_size_bytes: " << m_memory_size_bytes << endl
|
||||
<< " memory_size_bits: " << m_memory_size_bits << endl;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::printConfig(ostream& out)
|
||||
{
|
||||
out << "\n================ Begin RubySystem Configuration Print ================\n\n";
|
||||
printSystemConfig(out);
|
||||
m_network_ptr->printConfig(out);
|
||||
m_profiler_ptr->printConfig(out);
|
||||
out << "\n================ End RubySystem Configuration Print ================\n\n";
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::printStats(ostream& out)
|
||||
{
|
||||
const time_t T = time(NULL);
|
||||
tm *localTime = localtime(&T);
|
||||
char buf[100];
|
||||
strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime);
|
||||
|
||||
out << "Real time: " << buf << endl;
|
||||
|
||||
m_profiler_ptr->printStats(out);
|
||||
m_network_ptr->printStats(out);
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::writeCompressedTrace(uint8* raw_data, string filename,
|
||||
uint64 uncompressed_trace_size)
|
||||
{
|
||||
// Create the checkpoint file for the memory
|
||||
string thefile = Checkpoint::dir() + "/" + filename.c_str();
|
||||
|
||||
int fd = creat(thefile.c_str(), 0664);
|
||||
if (fd < 0) {
|
||||
perror("creat");
|
||||
fatal("Can't open memory trace file '%s'\n", filename);
|
||||
}
|
||||
|
||||
gzFile compressedMemory = gzdopen(fd, "wb");
|
||||
if (compressedMemory == NULL)
|
||||
fatal("Insufficient memory to allocate compression state for %s\n",
|
||||
filename);
|
||||
|
||||
if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
|
||||
uncompressed_trace_size) {
|
||||
fatal("Write failed on memory trace file '%s'\n", filename);
|
||||
}
|
||||
|
||||
if (gzclose(compressedMemory)) {
|
||||
fatal("Close failed on memory trace file '%s'\n", filename);
|
||||
}
|
||||
delete raw_data;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::serialize(std::ostream &os)
|
||||
{
|
||||
m_cooldown_enabled = true;
|
||||
|
||||
vector<Sequencer*> sequencer_map;
|
||||
Sequencer* sequencer_ptr = NULL;
|
||||
int cntrl_id = -1;
|
||||
|
||||
|
||||
for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
|
||||
sequencer_map.push_back(m_abs_cntrl_vec[cntrl]->getSequencer());
|
||||
if (sequencer_ptr == NULL) {
|
||||
sequencer_ptr = sequencer_map[cntrl];
|
||||
cntrl_id = cntrl;
|
||||
}
|
||||
}
|
||||
|
||||
assert(sequencer_ptr != NULL);
|
||||
|
||||
for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
|
||||
if (sequencer_map[cntrl] == NULL) {
|
||||
sequencer_map[cntrl] = sequencer_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
DPRINTF(RubyCacheTrace, "Recording Cache Trace\n");
|
||||
// Create the CacheRecorder and record the cache trace
|
||||
m_cache_recorder = new CacheRecorder(NULL, 0, sequencer_map);
|
||||
|
||||
for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
|
||||
m_abs_cntrl_vec[cntrl]->recordCacheTrace(cntrl, m_cache_recorder);
|
||||
}
|
||||
|
||||
DPRINTF(RubyCacheTrace, "Cache Trace Complete\n");
|
||||
// save the current tick value
|
||||
Tick curtick_original = curTick();
|
||||
// save the event queue head
|
||||
Event* eventq_head = eventq->replaceHead(NULL);
|
||||
DPRINTF(RubyCacheTrace, "Recording current tick %ld and event queue\n",
|
||||
curtick_original);
|
||||
|
||||
// Schedule an event to start cache cooldown
|
||||
DPRINTF(RubyCacheTrace, "Starting cache flush\n");
|
||||
enqueueRubyEvent(curTick());
|
||||
simulate();
|
||||
DPRINTF(RubyCacheTrace, "Cache flush complete\n");
|
||||
|
||||
// Restore eventq head
|
||||
eventq_head = eventq->replaceHead(eventq_head);
|
||||
// Restore curTick
|
||||
curTick(curtick_original);
|
||||
|
||||
uint8* raw_data = NULL;
|
||||
|
||||
if (m_mem_vec_ptr != NULL) {
|
||||
uint64 memory_trace_size = m_mem_vec_ptr->collatePages(raw_data);
|
||||
|
||||
string memory_trace_file = name() + ".memory.gz";
|
||||
writeCompressedTrace(raw_data, memory_trace_file,
|
||||
memory_trace_size);
|
||||
|
||||
SERIALIZE_SCALAR(memory_trace_file);
|
||||
SERIALIZE_SCALAR(memory_trace_size);
|
||||
|
||||
} else {
|
||||
for (int i = 0; i < m_sparse_memory_vector.size(); ++i) {
|
||||
m_sparse_memory_vector[i]->recordBlocks(cntrl_id,
|
||||
m_cache_recorder);
|
||||
}
|
||||
}
|
||||
|
||||
// Aggergate the trace entries together into a single array
|
||||
raw_data = new uint8_t[4096];
|
||||
uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
|
||||
4096);
|
||||
string cache_trace_file = name() + ".cache.gz";
|
||||
writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size);
|
||||
|
||||
SERIALIZE_SCALAR(cache_trace_file);
|
||||
SERIALIZE_SCALAR(cache_trace_size);
|
||||
|
||||
m_cooldown_enabled = false;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::readCompressedTrace(string filename, uint8*& raw_data,
|
||||
uint64& uncompressed_trace_size)
|
||||
{
|
||||
// Read the trace file
|
||||
gzFile compressedTrace;
|
||||
|
||||
// trace file
|
||||
int fd = open(filename.c_str(), O_RDONLY);
|
||||
if (fd < 0) {
|
||||
perror("open");
|
||||
fatal("Unable to open trace file %s", filename);
|
||||
}
|
||||
|
||||
compressedTrace = gzdopen(fd, "rb");
|
||||
if (compressedTrace == NULL) {
|
||||
fatal("Insufficient memory to allocate compression state for %s\n",
|
||||
filename);
|
||||
}
|
||||
|
||||
raw_data = new uint8_t[uncompressed_trace_size];
|
||||
if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
|
||||
uncompressed_trace_size) {
|
||||
fatal("Unable to read complete trace from file %s\n", filename);
|
||||
}
|
||||
|
||||
if (gzclose(compressedTrace)) {
|
||||
fatal("Failed to close cache trace file '%s'\n", filename);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::unserialize(Checkpoint *cp, const string §ion)
|
||||
{
|
||||
//
|
||||
// The main purpose for clearing stats in the unserialize process is so
|
||||
// that the profiler can correctly set its start time to the unserialized
|
||||
// value of curTick()
|
||||
//
|
||||
clearStats();
|
||||
uint8* uncompressed_trace = NULL;
|
||||
|
||||
if (m_mem_vec_ptr != NULL) {
|
||||
string memory_trace_file;
|
||||
uint64 memory_trace_size = 0;
|
||||
|
||||
UNSERIALIZE_SCALAR(memory_trace_file);
|
||||
UNSERIALIZE_SCALAR(memory_trace_size);
|
||||
memory_trace_file = cp->cptDir + "/" + memory_trace_file;
|
||||
|
||||
readCompressedTrace(memory_trace_file, uncompressed_trace,
|
||||
memory_trace_size);
|
||||
m_mem_vec_ptr->populatePages(uncompressed_trace);
|
||||
|
||||
delete uncompressed_trace;
|
||||
uncompressed_trace = NULL;
|
||||
}
|
||||
|
||||
string cache_trace_file;
|
||||
uint64 cache_trace_size = 0;
|
||||
|
||||
UNSERIALIZE_SCALAR(cache_trace_file);
|
||||
UNSERIALIZE_SCALAR(cache_trace_size);
|
||||
cache_trace_file = cp->cptDir + "/" + cache_trace_file;
|
||||
|
||||
readCompressedTrace(cache_trace_file, uncompressed_trace,
|
||||
cache_trace_size);
|
||||
m_warmup_enabled = true;
|
||||
|
||||
vector<Sequencer*> sequencer_map;
|
||||
Sequencer* t = NULL;
|
||||
for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
|
||||
sequencer_map.push_back(m_abs_cntrl_vec[cntrl]->getSequencer());
|
||||
if(t == NULL) t = sequencer_map[cntrl];
|
||||
}
|
||||
|
||||
assert(t != NULL);
|
||||
|
||||
for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
|
||||
if (sequencer_map[cntrl] == NULL) {
|
||||
sequencer_map[cntrl] = t;
|
||||
}
|
||||
}
|
||||
|
||||
m_cache_recorder = new CacheRecorder(uncompressed_trace, cache_trace_size,
|
||||
sequencer_map);
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::startup()
|
||||
{
|
||||
if (m_warmup_enabled) {
|
||||
// save the current tick value
|
||||
Tick curtick_original = curTick();
|
||||
// save the event queue head
|
||||
Event* eventq_head = eventq->replaceHead(NULL);
|
||||
// set curTick to 0
|
||||
curTick(0);
|
||||
|
||||
// Schedule an event to start cache warmup
|
||||
enqueueRubyEvent(curTick());
|
||||
simulate();
|
||||
|
||||
delete m_cache_recorder;
|
||||
m_cache_recorder = NULL;
|
||||
m_warmup_enabled = false;
|
||||
// Restore eventq head
|
||||
eventq_head = eventq->replaceHead(eventq_head);
|
||||
// Restore curTick
|
||||
curTick(curtick_original);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::RubyEvent::process()
|
||||
{
|
||||
if (ruby_system->m_warmup_enabled) {
|
||||
ruby_system->m_cache_recorder->enqueueNextFetchRequest();
|
||||
} else if (ruby_system->m_cooldown_enabled) {
|
||||
ruby_system->m_cache_recorder->enqueueNextFlushRequest();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::clearStats() const
|
||||
{
|
||||
m_profiler_ptr->clearStats();
|
||||
m_network_ptr->clearStats();
|
||||
}
|
||||
|
||||
#ifdef CHECK_COHERENCE
|
||||
// This code will check for cases if the given cache block is exclusive in
|
||||
// one node and shared in another-- a coherence violation
|
||||
//
|
||||
// To use, the SLICC specification must call sequencer.checkCoherence(address)
|
||||
// when the controller changes to a state with new permissions. Do this
|
||||
// in setState. The SLICC spec must also define methods "isBlockShared"
|
||||
// and "isBlockExclusive" that are specific to that protocol
|
||||
//
|
||||
void
|
||||
RubySystem::checkGlobalCoherenceInvariant(const Address& addr)
|
||||
{
|
||||
#if 0
|
||||
NodeID exclusive = -1;
|
||||
bool sharedDetected = false;
|
||||
NodeID lastShared = -1;
|
||||
|
||||
for (int i = 0; i < m_chip_vector.size(); i++) {
|
||||
if (m_chip_vector[i]->isBlockExclusive(addr)) {
|
||||
if (exclusive != -1) {
|
||||
// coherence violation
|
||||
WARN_EXPR(exclusive);
|
||||
WARN_EXPR(m_chip_vector[i]->getID());
|
||||
WARN_EXPR(addr);
|
||||
WARN_EXPR(g_eventQueue_ptr->getTime());
|
||||
ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
|
||||
} else if (sharedDetected) {
|
||||
WARN_EXPR(lastShared);
|
||||
WARN_EXPR(m_chip_vector[i]->getID());
|
||||
WARN_EXPR(addr);
|
||||
WARN_EXPR(g_eventQueue_ptr->getTime());
|
||||
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
|
||||
} else {
|
||||
exclusive = m_chip_vector[i]->getID();
|
||||
}
|
||||
} else if (m_chip_vector[i]->isBlockShared(addr)) {
|
||||
sharedDetected = true;
|
||||
lastShared = m_chip_vector[i]->getID();
|
||||
|
||||
if (exclusive != -1) {
|
||||
WARN_EXPR(lastShared);
|
||||
WARN_EXPR(exclusive);
|
||||
WARN_EXPR(addr);
|
||||
WARN_EXPR(g_eventQueue_ptr->getTime());
|
||||
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
RubySystem *
|
||||
RubySystemParams::create()
|
||||
{
|
||||
return new RubySystem(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* virtual process function that is invoked when the callback
|
||||
* queue is executed.
|
||||
*/
|
||||
void
|
||||
RubyExitCallback::process()
|
||||
{
|
||||
std::ostream *os = simout.create(stats_filename);
|
||||
RubySystem::printConfig(*os);
|
||||
*os << endl;
|
||||
RubySystem::printStats(*os);
|
||||
}
|
||||
198
simulators/gem5/src/mem/ruby/system/System.hh
Normal file
198
simulators/gem5/src/mem/ruby/system/System.hh
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Contains all of the various parts of the system we are simulating.
|
||||
* Performs allocation, deallocation, and setup of all the major
|
||||
* components of the system
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_SYSTEM_HH__
|
||||
#define __MEM_RUBY_SYSTEM_SYSTEM_HH__
|
||||
|
||||
#include "base/callback.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
|
||||
#include "mem/ruby/recorder/CacheRecorder.hh"
|
||||
#include "mem/ruby/slicc_interface/AbstractController.hh"
|
||||
#include "mem/ruby/system/MemoryVector.hh"
|
||||
#include "mem/ruby/system/SparseMemory.hh"
|
||||
#include "params/RubySystem.hh"
|
||||
#include "sim/sim_object.hh"
|
||||
|
||||
class Network;
|
||||
class Profiler;
|
||||
|
||||
class RubySystem : public SimObject
|
||||
{
|
||||
public:
|
||||
class RubyEvent : public Event
|
||||
{
|
||||
public:
|
||||
RubyEvent(RubySystem* _ruby_system)
|
||||
{
|
||||
ruby_system = _ruby_system;
|
||||
}
|
||||
private:
|
||||
void process();
|
||||
|
||||
RubySystem* ruby_system;
|
||||
};
|
||||
|
||||
friend class RubyEvent;
|
||||
|
||||
typedef RubySystemParams Params;
|
||||
RubySystem(const Params *p);
|
||||
~RubySystem();
|
||||
|
||||
// config accessors
|
||||
static int getRandomSeed() { return m_random_seed; }
|
||||
static int getRandomization() { return m_randomization; }
|
||||
static int getBlockSizeBytes() { return m_block_size_bytes; }
|
||||
static int getBlockSizeBits() { return m_block_size_bits; }
|
||||
static uint64 getMemorySizeBytes() { return m_memory_size_bytes; }
|
||||
static int getMemorySizeBits() { return m_memory_size_bits; }
|
||||
|
||||
// Public Methods
|
||||
static Network*
|
||||
getNetwork()
|
||||
{
|
||||
assert(m_network_ptr != NULL);
|
||||
return m_network_ptr;
|
||||
}
|
||||
|
||||
static RubyEventQueue*
|
||||
getEventQueue()
|
||||
{
|
||||
return g_eventQueue_ptr;
|
||||
}
|
||||
|
||||
Profiler*
|
||||
getProfiler()
|
||||
{
|
||||
assert(m_profiler_ptr != NULL);
|
||||
return m_profiler_ptr;
|
||||
}
|
||||
|
||||
static MemoryVector*
|
||||
getMemoryVector()
|
||||
{
|
||||
assert(m_mem_vec_ptr != NULL);
|
||||
return m_mem_vec_ptr;
|
||||
}
|
||||
|
||||
static void printConfig(std::ostream& out);
|
||||
static void printStats(std::ostream& out);
|
||||
void clearStats() const;
|
||||
|
||||
uint64 getInstructionCount(int thread) { return 1; }
|
||||
static uint64
|
||||
getCycleCount(int thread)
|
||||
{
|
||||
return g_eventQueue_ptr->getTime();
|
||||
}
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
void serialize(std::ostream &os);
|
||||
void unserialize(Checkpoint *cp, const std::string §ion);
|
||||
void process();
|
||||
void startup();
|
||||
|
||||
void registerNetwork(Network*);
|
||||
void registerProfiler(Profiler*);
|
||||
void registerAbstractController(AbstractController*);
|
||||
void registerSparseMemory(SparseMemory*);
|
||||
|
||||
bool eventQueueEmpty() { return eventq->empty(); }
|
||||
void enqueueRubyEvent(Tick tick)
|
||||
{
|
||||
RubyEvent* e = new RubyEvent(this);
|
||||
schedule(e, tick);
|
||||
}
|
||||
|
||||
private:
|
||||
// Private copy constructor and assignment operator
|
||||
RubySystem(const RubySystem& obj);
|
||||
RubySystem& operator=(const RubySystem& obj);
|
||||
|
||||
void init();
|
||||
|
||||
static void printSystemConfig(std::ostream& out);
|
||||
void readCompressedTrace(std::string filename,
|
||||
uint8*& raw_data,
|
||||
uint64& uncompressed_trace_size);
|
||||
void writeCompressedTrace(uint8* raw_data, std::string file,
|
||||
uint64 uncompressed_trace_size);
|
||||
|
||||
private:
|
||||
// configuration parameters
|
||||
static int m_random_seed;
|
||||
static bool m_randomization;
|
||||
static Tick m_clock;
|
||||
static int m_block_size_bytes;
|
||||
static int m_block_size_bits;
|
||||
static uint64 m_memory_size_bytes;
|
||||
static int m_memory_size_bits;
|
||||
static Network* m_network_ptr;
|
||||
|
||||
public:
|
||||
static Profiler* m_profiler_ptr;
|
||||
static MemoryVector* m_mem_vec_ptr;
|
||||
std::vector<AbstractController*> m_abs_cntrl_vec;
|
||||
bool m_warmup_enabled;
|
||||
bool m_cooldown_enabled;
|
||||
CacheRecorder* m_cache_recorder;
|
||||
std::vector<SparseMemory*> m_sparse_memory_vector;
|
||||
};
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const RubySystem& obj)
|
||||
{
|
||||
//obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
class RubyExitCallback : public Callback
|
||||
{
|
||||
private:
|
||||
std::string stats_filename;
|
||||
|
||||
public:
|
||||
virtual ~RubyExitCallback() {}
|
||||
|
||||
RubyExitCallback(const std::string& _stats_filename)
|
||||
{
|
||||
stats_filename = _stats_filename;
|
||||
}
|
||||
|
||||
virtual void process();
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_SYSTEM_HH__
|
||||
133
simulators/gem5/src/mem/ruby/system/TBETable.hh
Normal file
133
simulators/gem5/src/mem/ruby/system/TBETable.hh
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_TBETABLE_HH__
|
||||
#define __MEM_RUBY_SYSTEM_TBETABLE_HH__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "base/hashmap.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/profiler/Profiler.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
|
||||
template<class ENTRY>
|
||||
class TBETable
|
||||
{
|
||||
public:
|
||||
TBETable(int number_of_TBEs)
|
||||
: m_number_of_TBEs(number_of_TBEs)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
printConfig(std::ostream& out)
|
||||
{
|
||||
out << "TBEs_per_TBETable: " << m_number_of_TBEs << std::endl;
|
||||
}
|
||||
|
||||
bool isPresent(const Address& address) const;
|
||||
void allocate(const Address& address);
|
||||
void deallocate(const Address& address);
|
||||
bool
|
||||
areNSlotsAvailable(int n) const
|
||||
{
|
||||
return (m_number_of_TBEs - m_map.size()) >= n;
|
||||
}
|
||||
|
||||
ENTRY* lookup(const Address& address);
|
||||
|
||||
// Print cache contents
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
// Private copy constructor and assignment operator
|
||||
TBETable(const TBETable& obj);
|
||||
TBETable& operator=(const TBETable& obj);
|
||||
|
||||
// Data Members (m_prefix)
|
||||
m5::hash_map<Address, ENTRY> m_map;
|
||||
|
||||
private:
|
||||
int m_number_of_TBEs;
|
||||
};
|
||||
|
||||
template<class ENTRY>
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const TBETable<ENTRY>& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline bool
|
||||
TBETable<ENTRY>::isPresent(const Address& address) const
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(m_map.size() <= m_number_of_TBEs);
|
||||
return !!m_map.count(address);
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
TBETable<ENTRY>::allocate(const Address& address)
|
||||
{
|
||||
assert(!isPresent(address));
|
||||
assert(m_map.size() < m_number_of_TBEs);
|
||||
m_map[address] = ENTRY();
|
||||
}
|
||||
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
TBETable<ENTRY>::deallocate(const Address& address)
|
||||
{
|
||||
assert(isPresent(address));
|
||||
assert(m_map.size() > 0);
|
||||
m_map.erase(address);
|
||||
}
|
||||
|
||||
// looks an address up in the cache
|
||||
template<class ENTRY>
|
||||
inline ENTRY*
|
||||
TBETable<ENTRY>::lookup(const Address& address)
|
||||
{
|
||||
if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
template<class ENTRY>
|
||||
inline void
|
||||
TBETable<ENTRY>::print(std::ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_TBETABLE_HH__
|
||||
125
simulators/gem5/src/mem/ruby/system/TimerTable.cc
Normal file
125
simulators/gem5/src/mem/ruby/system/TimerTable.cc
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
|
||||
#include "mem/ruby/system/TimerTable.hh"
|
||||
|
||||
TimerTable::TimerTable()
|
||||
{
|
||||
m_consumer_ptr = NULL;
|
||||
m_next_valid = false;
|
||||
m_next_address = Address(0);
|
||||
m_next_time = 0;
|
||||
}
|
||||
|
||||
bool
|
||||
TimerTable::isReady() const
|
||||
{
|
||||
if (m_map.empty())
|
||||
return false;
|
||||
|
||||
if (!m_next_valid) {
|
||||
updateNext();
|
||||
}
|
||||
assert(m_next_valid);
|
||||
return (g_eventQueue_ptr->getTime() >= m_next_time);
|
||||
}
|
||||
|
||||
const Address&
|
||||
TimerTable::readyAddress() const
|
||||
{
|
||||
assert(isReady());
|
||||
|
||||
if (!m_next_valid) {
|
||||
updateNext();
|
||||
}
|
||||
assert(m_next_valid);
|
||||
return m_next_address;
|
||||
}
|
||||
|
||||
void
|
||||
TimerTable::set(const Address& address, Time relative_latency)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(relative_latency > 0);
|
||||
assert(!m_map.count(address));
|
||||
Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
|
||||
m_map[address] = ready_time;
|
||||
assert(m_consumer_ptr != NULL);
|
||||
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
|
||||
m_next_valid = false;
|
||||
|
||||
// Don't always recalculate the next ready address
|
||||
if (ready_time <= m_next_time) {
|
||||
m_next_valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
TimerTable::unset(const Address& address)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(m_map.count(address));
|
||||
m_map.erase(address);
|
||||
|
||||
// Don't always recalculate the next ready address
|
||||
if (address == m_next_address) {
|
||||
m_next_valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
TimerTable::print(std::ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
TimerTable::updateNext() const
|
||||
{
|
||||
if (m_map.empty()) {
|
||||
assert(!m_next_valid);
|
||||
return;
|
||||
}
|
||||
|
||||
AddressMap::const_iterator i = m_map.begin();
|
||||
AddressMap::const_iterator end = m_map.end();
|
||||
|
||||
m_next_address = i->first;
|
||||
m_next_time = i->second;
|
||||
++i;
|
||||
|
||||
for (; i != end; ++i) {
|
||||
if (i->second < m_next_time) {
|
||||
m_next_address = i->first;
|
||||
m_next_time = i->second;
|
||||
}
|
||||
}
|
||||
|
||||
m_next_valid = true;
|
||||
}
|
||||
96
simulators/gem5/src/mem/ruby/system/TimerTable.hh
Normal file
96
simulators/gem5/src/mem/ruby/system/TimerTable.hh
Normal file
@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
|
||||
#define __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
|
||||
class Consumer;
|
||||
|
||||
class TimerTable
|
||||
{
|
||||
public:
|
||||
TimerTable();
|
||||
|
||||
static void printConfig(std::ostream& out) {}
|
||||
|
||||
void
|
||||
setConsumer(Consumer* consumer_ptr)
|
||||
{
|
||||
assert(m_consumer_ptr == NULL);
|
||||
m_consumer_ptr = consumer_ptr;
|
||||
}
|
||||
|
||||
void
|
||||
setDescription(const std::string& name)
|
||||
{
|
||||
m_name = name;
|
||||
}
|
||||
|
||||
bool isReady() const;
|
||||
const Address& readyAddress() const;
|
||||
bool isSet(const Address& address) const { return !!m_map.count(address); }
|
||||
void set(const Address& address, Time relative_latency);
|
||||
void unset(const Address& address);
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
void updateNext() const;
|
||||
|
||||
// Private copy constructor and assignment operator
|
||||
TimerTable(const TimerTable& obj);
|
||||
TimerTable& operator=(const TimerTable& obj);
|
||||
|
||||
// Data Members (m_prefix)
|
||||
|
||||
// use a std::map for the address map as this container is sorted
|
||||
// and ensures a well-defined iteration order
|
||||
typedef std::map<Address, Time> AddressMap;
|
||||
AddressMap m_map;
|
||||
mutable bool m_next_valid;
|
||||
mutable Time m_next_time; // Only valid if m_next_valid is true
|
||||
mutable Address m_next_address; // Only valid if m_next_valid is true
|
||||
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
|
||||
std::string m_name;
|
||||
};
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const TimerTable& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
|
||||
171
simulators/gem5/src/mem/ruby/system/WireBuffer.cc
Normal file
171
simulators/gem5/src/mem/ruby/system/WireBuffer.cc
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Advanced Micro Devices, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Author: Lisa Hsu
|
||||
*
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
|
||||
#include "base/cprintf.hh"
|
||||
#include "base/stl_helpers.hh"
|
||||
#include "mem/ruby/system/WireBuffer.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
class Consumer;
|
||||
|
||||
|
||||
// Output operator definition
|
||||
|
||||
ostream&
|
||||
operator<<(ostream& out, const WireBuffer& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
// ****************************************************************
|
||||
|
||||
// CONSTRUCTOR
|
||||
WireBuffer::WireBuffer(const Params *p)
|
||||
: SimObject(p)
|
||||
{
|
||||
m_msg_counter = 0;
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::init()
|
||||
{
|
||||
}
|
||||
|
||||
WireBuffer::~WireBuffer()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::enqueue(MsgPtr message, int latency)
|
||||
{
|
||||
m_msg_counter++;
|
||||
Time current_time = g_eventQueue_ptr->getTime();
|
||||
Time arrival_time = current_time + latency;
|
||||
assert(arrival_time > current_time);
|
||||
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
|
||||
m_message_queue.push_back(thisNode);
|
||||
if (m_consumer_ptr != NULL) {
|
||||
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
|
||||
} else {
|
||||
panic("No Consumer for WireBuffer! %s\n", *this);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::dequeue()
|
||||
{
|
||||
assert(isReady());
|
||||
pop_heap(m_message_queue.begin(), m_message_queue.end(),
|
||||
greater<MessageBufferNode>());
|
||||
m_message_queue.pop_back();
|
||||
}
|
||||
|
||||
const Message*
|
||||
WireBuffer::peek()
|
||||
{
|
||||
MessageBufferNode node = peekNode();
|
||||
Message* msg_ptr = node.m_msgptr.get();
|
||||
assert(msg_ptr != NULL);
|
||||
return msg_ptr;
|
||||
}
|
||||
|
||||
MessageBufferNode
|
||||
WireBuffer::peekNode()
|
||||
{
|
||||
assert(isReady());
|
||||
MessageBufferNode req = m_message_queue.front();
|
||||
return req;
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::recycle()
|
||||
{
|
||||
// Because you don't want anything reordered, make sure the recycle latency
|
||||
// is just 1 cycle. As a result, you really want to use this only in
|
||||
// Wire-like situations because you don't want to deadlock as a result of
|
||||
// being stuck behind something if you're not actually supposed to.
|
||||
assert(isReady());
|
||||
MessageBufferNode node = m_message_queue.front();
|
||||
pop_heap(m_message_queue.begin(), m_message_queue.end(),
|
||||
greater<MessageBufferNode>());
|
||||
node.m_time = g_eventQueue_ptr->getTime() + 1;
|
||||
m_message_queue.back() = node;
|
||||
push_heap(m_message_queue.begin(), m_message_queue.end(),
|
||||
greater<MessageBufferNode>());
|
||||
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr,
|
||||
g_eventQueue_ptr->getTime() + 1);
|
||||
}
|
||||
|
||||
bool
|
||||
WireBuffer::isReady()
|
||||
{
|
||||
return ((!m_message_queue.empty()) &&
|
||||
(m_message_queue.front().m_time <= g_eventQueue_ptr->getTime()));
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::print(ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::printConfig(ostream& out)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::clearStats() const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::printStats(ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::wakeup()
|
||||
{
|
||||
}
|
||||
|
||||
WireBuffer *
|
||||
RubyWireBufferParams::create()
|
||||
{
|
||||
return new WireBuffer(this);
|
||||
}
|
||||
|
||||
106
simulators/gem5/src/mem/ruby/system/WireBuffer.hh
Normal file
106
simulators/gem5/src/mem/ruby/system/WireBuffer.hh
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Advanced Micro Devices, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Author: Lisa Hsu
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_SYSTEM_WIREBUFFER_HH__
|
||||
#define __MEM_RUBY_SYSTEM_WIREBUFFER_HH__
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "mem/ruby/buffers/MessageBufferNode.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
|
||||
#include "params/RubyWireBuffer.hh"
|
||||
#include "sim/sim_object.hh"
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// This object was written to literally mimic a Wire in Ruby, in the sense
|
||||
// that there is no way for messages to get reordered en route on the WireBuffer.
|
||||
// With Message Buffers, even if randomization is off and ordered is on,
|
||||
// messages can arrive in different orders than they were sent because of
|
||||
// network issues. This mimics a Wire, such that that is not possible. This can
|
||||
// allow for messages between closely coupled controllers that are not actually
|
||||
// separated by a network in real systems to simplify coherence.
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class Consumer;
|
||||
class Message;
|
||||
|
||||
class WireBuffer : public SimObject
|
||||
{
|
||||
public:
|
||||
typedef RubyWireBufferParams Params;
|
||||
WireBuffer(const Params *p);
|
||||
void init();
|
||||
|
||||
~WireBuffer();
|
||||
|
||||
void wakeup();
|
||||
|
||||
void setConsumer(Consumer* consumer_ptr)
|
||||
{
|
||||
m_consumer_ptr = consumer_ptr;
|
||||
}
|
||||
Consumer* getConsumer() { return m_consumer_ptr; };
|
||||
void setDescription(const std::string& name) { m_description = name; };
|
||||
std::string getDescription() { return m_description; };
|
||||
|
||||
void enqueue(MsgPtr message, int latency );
|
||||
void dequeue();
|
||||
const Message* peek();
|
||||
MessageBufferNode peekNode();
|
||||
void recycle();
|
||||
bool isReady();
|
||||
bool areNSlotsAvailable(int n) { return true; }; // infinite queue length
|
||||
|
||||
void printConfig(std::ostream& out);
|
||||
void print(std::ostream& out) const;
|
||||
void clearStats() const;
|
||||
void printStats(std::ostream& out) const;
|
||||
|
||||
uint64_t m_msg_counter;
|
||||
|
||||
private:
|
||||
// Private copy constructor and assignment operator
|
||||
WireBuffer (const WireBuffer& obj);
|
||||
WireBuffer& operator=(const WireBuffer& obj);
|
||||
|
||||
// data members
|
||||
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
|
||||
std::string m_description;
|
||||
|
||||
// queues where memory requests live
|
||||
std::vector<MessageBufferNode> m_message_queue;
|
||||
|
||||
};
|
||||
|
||||
#endif // __MEM_RUBY_SYSTEM_WireBuffer_HH__
|
||||
34
simulators/gem5/src/mem/ruby/system/WireBuffer.py
Normal file
34
simulators/gem5/src/mem/ruby/system/WireBuffer.py
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright (c) 2010 Advanced Micro Devices, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Author: Lisa Hsu
|
||||
|
||||
from m5.params import *
|
||||
from m5.SimObject import SimObject
|
||||
|
||||
class RubyWireBuffer(SimObject):
|
||||
type = 'RubyWireBuffer'
|
||||
cxx_class = 'WireBuffer'
|
||||
Reference in New Issue
Block a user