Adding gem5 source to svn.

git-svn-id: https://www4.informatik.uni-erlangen.de/i4svn/danceos/trunk/devel/fail@1819 8c4709b5-6ec9-48aa-a5cd-a96041d1645a
This commit is contained in:
friemel
2012-10-24 19:18:57 +00:00
parent f7ff71bd46
commit b41eec3f65
3222 changed files with 658579 additions and 1 deletions

View File

@ -0,0 +1,966 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
machine(L1Cache, "MESI Directory L1 Cache CMP")
: Sequencer * sequencer,
CacheMemory * L1IcacheMemory,
CacheMemory * L1DcacheMemory,
int l2_select_num_bits,
int l1_request_latency = 2,
int l1_response_latency = 2,
int to_l2_latency = 1,
bool send_evictions
{
// NODE L1 CACHE
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
// a local L1 -> this L2 bank
MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="response";
MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
// a L2 bank -> this L1
MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="response";
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
NP, AccessPermission:Invalid, desc="Not present in either cache";
I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
// Transient States
IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
}
// EVENTS
enumeration(Event, desc="Cache events") {
// L1 events
Load, desc="Load request from the home processor";
Ifetch, desc="I-fetch request from the home processor";
Store, desc="Store request from the home processor";
Inv, desc="Invalidate request from L2 bank";
// internal generated request
L1_Replacement, desc="L1 Replacement", format="!r";
// other requests
Fwd_GETX, desc="GETX from other processor";
Fwd_GETS, desc="GETS from other processor";
Fwd_GET_INSTR, desc="GET_INSTR from other processor";
Data, desc="Data for processor";
Data_Exclusive, desc="Data for processor";
DataS_fromL1, desc="data for GETS request, need to unblock directory";
Data_all_Acks, desc="Data for processor, all acks";
Ack, desc="Ack for processor";
Ack_all, desc="Last ack for processor";
WB_Ack, desc="Ack for replacement";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="data for the block";
bool Dirty, default="false", desc="data is dirty";
}
// TBE fields
structure(TBE, desc="...") {
Address Address, desc="Physical address for this TBE";
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="Buffer for the data block";
bool Dirty, default="false", desc="data is dirty";
bool isPrefetch, desc="Set if this was caused by a prefetch";
int pendingAcks, default="0", desc="number of pending acks";
}
structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
MessageBuffer mandatoryQueue, ordered="false";
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
void unset_tbe();
void wakeUpBuffers(Address a);
// inclusive cache returns L1 entries only
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
return L1Icache_entry;
}
Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
return L1Icache_entry;
}
State getState(TBE tbe, Entry cache_entry, Address addr) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
if(is_valid(tbe)) {
return tbe.TBEState;
} else if (is_valid(cache_entry)) {
return cache_entry.CacheState;
}
return State:NP;
}
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
// MUST CHANGE
if(is_valid(tbe)) {
tbe.TBEState := state;
}
if (is_valid(cache_entry)) {
cache_entry.CacheState := state;
}
}
AccessPermission getAccessPermission(Address addr) {
TBE tbe := L1_TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
}
Entry cache_entry := getCacheEntry(addr);
if(is_valid(cache_entry)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
return L1Cache_State_to_permission(cache_entry.CacheState);
}
DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
return AccessPermission:NotPresent;
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
return getCacheEntry(addr).DataBlk;
}
void setAccessPermission(Entry cache_entry, Address addr, State state) {
if (is_valid(cache_entry)) {
cache_entry.changePermission(L1Cache_State_to_permission(state));
}
}
Event mandatory_request_type_to_event(RubyRequestType type) {
if (type == RubyRequestType:LD) {
return Event:Load;
} else if (type == RubyRequestType:IFETCH) {
return Event:Ifetch;
} else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
return Event:Store;
} else {
error("Invalid RubyRequestType");
}
}
int getPendingAcks(TBE tbe) {
return tbe.pendingAcks;
}
out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
// Response IntraChip L1 Network - response msg to this L1 cache
in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
if (responseIntraChipL1Network_in.isReady()) {
peek(responseIntraChipL1Network_in, ResponseMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.Address);
TBE tbe := L1_TBEs[in_msg.Address];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.Address, cache_entry, tbe);
} else if(in_msg.Type == CoherenceResponseType:DATA) {
if ((getState(tbe, cache_entry, in_msg.Address) == State:IS ||
getState(tbe, cache_entry, in_msg.Address) == State:IS_I) &&
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
trigger(Event:DataS_fromL1, in_msg.Address, cache_entry, tbe);
} else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
trigger(Event:Data_all_Acks, in_msg.Address, cache_entry, tbe);
} else {
trigger(Event:Data, in_msg.Address, cache_entry, tbe);
}
} else if (in_msg.Type == CoherenceResponseType:ACK) {
if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
trigger(Event:Ack_all, in_msg.Address, cache_entry, tbe);
} else {
trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
}
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
trigger(Event:WB_Ack, in_msg.Address, cache_entry, tbe);
} else {
error("Invalid L1 response type");
}
}
}
}
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
if(requestIntraChipL1Network_in.isReady()) {
peek(requestIntraChipL1Network_in, RequestMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.Address);
TBE tbe := L1_TBEs[in_msg.Address];
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
// upgrade transforms to GETX due to race
trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
trigger(Event:Fwd_GET_INSTR, in_msg.Address, cache_entry, tbe);
} else {
error("Invalid forwarded request type");
}
}
}
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
}
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
// *** DATA ACCESS ***
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
}
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
}
}
}
}
}
}
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
DPRINTF(RubySlicc, "address: %s, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
DPRINTF(RubySlicc, "address: %s, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
DPRINTF(RubySlicc, "%s\n", machineID);
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
DPRINTF(RubySlicc, "address: %s, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:UPGRADE;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
DPRINTF(RubySlicc, "address: %s, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(d_sendDataToRequestor, "d", desc="send data to requestor") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := tbe.DataBlk;
out_msg.Dirty := tbe.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := tbe.DataBlk;
out_msg.Dirty := tbe.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
}
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := tbe.DataBlk;
out_msg.Dirty := tbe.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
}
action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Control;
out_msg.AckCount := 1;
}
}
}
action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
if (send_evictions) {
DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
sequencer.evictionCallback(address);
}
}
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Requestor:= machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
if (cache_entry.Dirty) {
out_msg.MessageSize := MessageSizeType:Writeback_Data;
} else {
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
enqueue(unblockNetwork_out, ResponseMsg, latency=to_l2_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Response_Control;
DPRINTF(RubySlicc, "%s\n", address);
}
}
action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
enqueue(unblockNetwork_out, ResponseMsg, latency=to_l2_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Response_Control;
DPRINTF(RubySlicc, "%s\n", address);
}
}
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
sequencer.readCallback(address, cache_entry.DataBlk);
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
check_allocate(L1_TBEs);
assert(is_valid(cache_entry));
L1_TBEs.allocate(address);
set_tbe(L1_TBEs[address]);
tbe.isPrefetch := false;
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
L1_TBEs.deallocate(address);
unset_tbe();
}
action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
assert(is_valid(cache_entry));
cache_entry.DataBlk := in_msg.DataBlk;
cache_entry.Dirty := in_msg.Dirty;
}
}
action(q_updateAckCount, "q", desc="Update ack count") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
assert(is_valid(tbe));
tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
APPEND_TRANSITION_COMMENT(" p: ");
APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
}
}
action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
}
unset_cache_entry();
}
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
}
}
action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle L1 request queue") {
stall_and_wait(mandatoryQueue_in, address);
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
wakeUpBuffers(address);
}
action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
peek(mandatoryQueue_in, RubyRequest) {
L1IcacheMemory.profileMiss(in_msg);
}
}
action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
peek(mandatoryQueue_in, RubyRequest) {
L1DcacheMemory.profileMiss(in_msg);
}
}
//*****************************************************
// TRANSITIONS
//*****************************************************
// Transitions for Load/Store/Replacement/WriteBack from transient states
transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
z_stallAndWaitMandatoryQueue;
}
// Transitions from Idle
transition({NP,I}, L1_Replacement) {
ff_deallocateL1CacheBlock;
}
transition({NP,I}, Load, IS) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
uu_profileDataMiss;
k_popMandatoryQueue;
}
transition({NP,I}, Ifetch, IS) {
pp_allocateL1ICacheBlock;
i_allocateTBE;
ai_issueGETINSTR;
uu_profileInstMiss;
k_popMandatoryQueue;
}
transition({NP,I}, Store, IM) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
uu_profileDataMiss;
k_popMandatoryQueue;
}
transition({NP, I}, Inv) {
fi_sendInvAck;
l_popRequestQueue;
}
// Transitions from Shared
transition(S, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(S, Store, SM) {
i_allocateTBE;
c_issueUPGRADE;
uu_profileDataMiss;
k_popMandatoryQueue;
}
transition(S, L1_Replacement, I) {
forward_eviction_to_cpu;
ff_deallocateL1CacheBlock;
}
transition(S, Inv, I) {
forward_eviction_to_cpu;
fi_sendInvAck;
l_popRequestQueue;
}
// Transitions from Exclusive
transition(E, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(E, Store, M) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(E, L1_Replacement, M_I) {
// silent E replacement??
forward_eviction_to_cpu;
i_allocateTBE;
g_issuePUTX; // send data, but hold in case forwarded request
ff_deallocateL1CacheBlock;
}
transition(E, Inv, I) {
// don't send data
forward_eviction_to_cpu;
fi_sendInvAck;
l_popRequestQueue;
}
transition(E, Fwd_GETX, I) {
forward_eviction_to_cpu;
d_sendDataToRequestor;
l_popRequestQueue;
}
transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
d_sendDataToRequestor;
d2_sendDataToL2;
l_popRequestQueue;
}
// Transitions from Modified
transition(M, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(M, Store) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(M, L1_Replacement, M_I) {
forward_eviction_to_cpu;
i_allocateTBE;
g_issuePUTX; // send data, but hold in case forwarded request
ff_deallocateL1CacheBlock;
}
transition(M_I, WB_Ack, I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(M, Inv, I) {
forward_eviction_to_cpu;
f_sendDataToL2;
l_popRequestQueue;
}
transition(M_I, Inv, SINK_WB_ACK) {
ft_sendDataToL2_fromTBE;
l_popRequestQueue;
}
transition(M, Fwd_GETX, I) {
forward_eviction_to_cpu;
d_sendDataToRequestor;
l_popRequestQueue;
}
transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
d_sendDataToRequestor;
d2_sendDataToL2;
l_popRequestQueue;
}
transition(M_I, Fwd_GETX, SINK_WB_ACK) {
dt_sendDataToRequestor_fromTBE;
l_popRequestQueue;
}
transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
dt_sendDataToRequestor_fromTBE;
d2t_sendDataToL2_fromTBE;
l_popRequestQueue;
}
// Transitions from IS
transition({IS, IS_I}, Inv, IS_I) {
fi_sendInvAck;
l_popRequestQueue;
}
transition(IS, Data_all_Acks, S) {
u_writeDataToL1Cache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS_I, Data_all_Acks, I) {
u_writeDataToL1Cache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS, DataS_fromL1, S) {
u_writeDataToL1Cache;
j_sendUnblock;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS_I, DataS_fromL1, I) {
u_writeDataToL1Cache;
j_sendUnblock;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// directory is blocked when sending exclusive data
transition(IS_I, Data_Exclusive, E) {
u_writeDataToL1Cache;
h_load_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS, Data_Exclusive, E) {
u_writeDataToL1Cache;
h_load_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// Transitions from IM
transition({IM, SM}, Inv, IM) {
fi_sendInvAck;
l_popRequestQueue;
}
transition(IM, Data, SM) {
u_writeDataToL1Cache;
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(IM, Data_all_Acks, M) {
u_writeDataToL1Cache;
hh_store_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// transitions from SM
transition({SM, IM}, Ack) {
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(SM, Ack_all, M) {
jj_sendExclusiveUnblock;
hh_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(SINK_WB_ACK, Inv){
fi_sendInvAck;
l_popRequestQueue;
}
transition(SINK_WB_ACK, WB_Ack, I){
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,606 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
// This file is copied from Yasuko Watanabe's prefetch / memory protocol
// Copied here by aep 12/14/07
machine(Directory, "MESI_CMP_filter_directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
int to_mem_ctrl_latency = 1,
int directory_latency = 6
{
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type="request";
MessageBuffer responseToDir, network="From", virtual_network="1", ordered="false", vnet_type="response";
MessageBuffer requestFromDir, network="To", virtual_network="0", ordered="false", vnet_type="request";
MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false", vnet_type="response";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
IM, AccessPermission:Busy, desc="Intermediate State I>M";
MI, AccessPermission:Busy, desc="Intermediate State M>I";
M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
}
// Events
enumeration(Event, desc="Directory events") {
Fetch, desc="A memory fetch arrives";
Data, desc="writeback data arrives";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
//added by SS for dma
DMA_READ, desc="A DMA Read memory request";
DMA_WRITE, desc="A DMA Write memory request";
CleanReplacement, desc="Clean Replacement in L2 cache";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
}
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
DataBlock DataBlk, desc="Data to be written (DMA write only)";
int Len, desc="...";
}
structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// ** OBJECTS **
TBETable TBEs, template_hack="<Directory_TBE>";
void set_tbe(TBE tbe);
void unset_tbe();
void wakeUpBuffers(Address a);
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
dir_entry := static_cast(Entry, "pointer",
directory.allocate(addr, new Entry));
return dir_entry;
}
State getState(TBE tbe, Address addr) {
if (is_valid(tbe)) {
return tbe.TBEState;
} else if (directory.isPresent(addr)) {
return getDirectoryEntry(addr).DirectoryState;
} else {
return State:I;
}
}
void setState(TBE tbe, Address addr, State state) {
if (is_valid(tbe)) {
tbe.TBEState := state;
}
if (directory.isPresent(addr)) {
if (state == State:I) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
} else if (state == State:M) {
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
getDirectoryEntry(addr).DirectoryState := state;
}
}
AccessPermission getAccessPermission(Address addr) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
}
if(directory.isPresent(addr)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
}
DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
return AccessPermission:NotPresent;
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
return getDirectoryEntry(addr).DataBlk;
}
void setAccessPermission(Address addr, State state) {
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
}
}
bool isGETRequest(CoherenceRequestType type) {
return (type == CoherenceRequestType:GETS) ||
(type == CoherenceRequestType:GET_INSTR) ||
(type == CoherenceRequestType:GETX);
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
trigger(Event:Fetch, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
trigger(Event:Data, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:CleanReplacement, in_msg.Address, TBEs[in_msg.Address]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer, rank = 2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Sender);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
// Actions
action(aa_sendAck, "aa", desc="Send ack to L2") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
responseNetwork_in.dequeue();
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
wakeUpBuffers(address);
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Sender;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Address, in_msg.DataBlk);
}
}
//added by SS for dma
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := machineID;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
requestNetwork_in.dequeue();
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
peek(requestNetwork_in, RequestMsg) {
getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
}
}
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := machineID;
//out_msg.DataBlk := in_msg.DataBlk;
out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
stall_and_wait(requestNetwork_in, address);
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
requestNetwork_in.recycle();
}
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
peek(requestNetwork_in, RequestMsg) {
getDirectoryEntry(address).Owner.clear();
getDirectoryEntry(address).Owner.add(in_msg.Requestor);
}
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:INV;
out_msg.Sender := machineID;
out_msg.Destination := getDirectoryEntry(address).Owner;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(c_clearOwner, "c", desc="Clear the owner field") {
getDirectoryEntry(address).Owner.clear();
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.Address;
tbe.Len := in_msg.Len;
}
}
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
assert(is_valid(tbe));
//getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
}
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Sender;
//out_msg.DataBlk := in_msg.DataBlk;
//out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
TBEs.deallocate(address);
unset_tbe();
}
// TRANSITIONS
transition(I, Fetch, IM) {
qf_queueMemoryFetchRequest;
e_ownerIsRequestor;
j_popIncomingRequestQueue;
}
transition(IM, Memory_Data, M) {
d_sendData;
l_popMemQueue;
kd_wakeUpDependents;
}
//added by SS
transition(M, CleanReplacement, I) {
c_clearOwner;
a_sendAck;
k_popIncomingResponseQueue;
}
transition(M, Data, MI) {
m_writeDataToMemory;
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(MI, Memory_Ack, I) {
c_clearOwner;
aa_sendAck;
l_popMemQueue;
kd_wakeUpDependents;
}
//added by SS for dma support
transition(I, DMA_READ, ID) {
qf_queueMemoryFetchRequestDMA;
j_popIncomingRequestQueue;
}
transition(ID, Memory_Data, I) {
dr_sendDMAData;
l_popMemQueue;
kd_wakeUpDependents;
}
transition(I, DMA_WRITE, ID_W) {
dw_writeDMAData;
qw_queueMemoryWBRequest_partial;
j_popIncomingRequestQueue;
}
transition(ID_W, Memory_Ack, I) {
da_sendDMAAck;
l_popMemQueue;
kd_wakeUpDependents;
}
transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
z_stallAndWaitRequest;
}
transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
zz_recycleDMAQueue;
}
transition(M, DMA_READ, M_DRD) {
inv_sendCacheInvalidate;
j_popIncomingRequestQueue;
}
transition(M_DRD, Data, M_DRDI) {
drp_sendDMAData;
m_writeDataToMemory;
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(M_DRDI, Memory_Ack, I) {
aa_sendAck;
c_clearOwner;
l_popMemQueue;
kd_wakeUpDependents;
}
transition(M, DMA_WRITE, M_DWR) {
v_allocateTBE;
inv_sendCacheInvalidate;
j_popIncomingRequestQueue;
}
transition(M_DWR, Data, M_DWRI) {
m_writeDataToMemory;
qw_queueMemoryWBRequest_partialTBE;
k_popIncomingResponseQueue;
}
transition(M_DWRI, Memory_Ack, I) {
dwt_writeDMADataFromTBE;
aa_sendAck;
c_clearOwner;
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
kd_wakeUpDependents;
}
}

View File

@ -0,0 +1,146 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
int request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request", no_vector="true";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
Data, desc="Data from a DMA memory read";
Ack, desc="DMA write to memory completed";
}
structure(DMASequencer, external="yes") {
void ackCallback();
void dataCallback(DataBlock);
}
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
State cur_state, no_vector="true";
State getState(Address addr) {
return cur_state;
}
void setState(Address addr, State state) {
cur_state := state;
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("DMA does not support get data block.");
}
out_port(reqToDirectory_out, RequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress);
} else {
error("Invalid request type");
}
}
}
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
if (dmaResponseQueue_in.isReady()) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, makeLineAddress(in_msg.Address));
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, makeLineAddress(in_msg.Address));
} else {
error("Invalid response type");
}
}
}
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
out_msg.Address := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_READ;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
out_msg.Address := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_WRITE;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
dma_sequencer.ackCallback();
}
action(d_dataCallback, "d", desc="Write data to dma sequencer") {
peek (dmaResponseQueue_in, ResponseMsg) {
dma_sequencer.dataCallback(in_msg.DataBlk);
}
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
dmaRequestQueue_in.dequeue();
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
dmaResponseQueue_in.dequeue();
}
action(z_stall, "z", desc="dma is busy..stall") {
// do nothing
}
transition(READY, ReadRequest, BUSY_RD) {
s_sendReadRequest;
p_popRequestQueue;
}
transition(READY, WriteRequest, BUSY_WR) {
s_sendWriteRequest;
p_popRequestQueue;
}
transition(BUSY_RD, Data, READY) {
d_dataCallback;
p_popResponseQueue;
}
transition(BUSY_WR, Ack, READY) {
a_ackCallback;
p_popResponseQueue;
}
}

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
UPGRADE, desc="UPGRADE to exclusive";
GETS, desc="Get Shared";
GET_INSTR, desc="Get Instruction";
INV, desc="INValidate";
PUTX, desc="replacement message";
WB_ACK, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
FWD, desc="Generic FWD";
DMA_READ, desc="DMA Read";
DMA_WRITE, desc="DMA Write";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
MEMORY_ACK, desc="Ack from memory controller";
DATA, desc="Data";
DATA_EXCLUSIVE, desc="Data";
MEMORY_DATA, desc="Data";
ACK, desc="Generic invalidate ack";
WB_ACK, desc="writeback ack";
UNBLOCK, desc="unblock";
EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
INV, desc="Invalidate from directory";
}
// RequestMsg
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
RubyAccessMode AccessMode, desc="user/supervisor access type";
MachineID Requestor , desc="What component request";
NetDest Destination, desc="What components receive the request, includes MachineType and num";
MessageSizeType MessageSize, desc="size category of the message";
DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
int Len;
bool Dirty, default="false", desc="Dirty bit";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// ResponseMsg
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="What component sent the data";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="Data for the cache line";
bool Dirty, default="false", desc="Dirty bit";
int AckCount, default="0", desc="number of acks in this message";
MessageSizeType MessageSize, desc="size category of the message";
}

View File

@ -0,0 +1,7 @@
protocol "MESI_CMP_directory";
include "RubySlicc_interfaces.slicc";
include "MESI_CMP_directory-msg.sm";
include "MESI_CMP_directory-L1cache.sm";
include "MESI_CMP_directory-L2cache.sm";
include "MESI_CMP_directory-dir.sm";
include "MESI_CMP_directory-dma.sm";

View File

@ -0,0 +1,481 @@
machine(L1Cache, "MI Example L1 Cache")
: Sequencer * sequencer,
CacheMemory * cacheMemory,
int cache_response_latency = 12,
int issue_latency = 2,
bool send_evictions
{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
// STATES
state_declaration(State, desc="Cache states") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";
II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
M, AccessPermission:Read_Write, desc="Modified";
MI, AccessPermission:Busy, desc="Modified, issued PUT";
MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
}
// EVENTS
enumeration(Event, desc="Cache events") {
// From processor
Load, desc="Load request from processor";
Ifetch, desc="Ifetch request from processor";
Store, desc="Store request from processor";
Data, desc="Data from network";
Fwd_GETX, desc="Forward from network";
Inv, desc="Invalidate request from dir";
Replacement, desc="Replace a block";
Writeback_Ack, desc="Ack from the directory for a writeback";
Writeback_Nack, desc="Nack from the directory for a writeback";
}
// STRUCTURE DEFINITIONS
MessageBuffer mandatoryQueue, ordered="false";
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
bool Dirty, desc="Is the data dirty (different than memory)?";
DataBlock DataBlk, desc="Data in the block";
}
// TBE fields
structure(TBE, desc="...") {
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
}
structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// STRUCTURES
TBETable TBEs, template_hack="<L1Cache_TBE>";
// PROTOTYPES
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE b);
void unset_tbe();
Entry getCacheEntry(Address address), return_by_pointer="yes" {
return static_cast(Entry, "pointer", cacheMemory.lookup(address));
}
// FUNCTIONS
Event mandatory_request_type_to_event(RubyRequestType type) {
if (type == RubyRequestType:LD) {
return Event:Load;
} else if (type == RubyRequestType:IFETCH) {
return Event:Ifetch;
} else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
return Event:Store;
} else {
error("Invalid RubyRequestType");
}
}
State getState(TBE tbe, Entry cache_entry, Address addr) {
if (is_valid(tbe)) {
return tbe.TBEState;
}
else if (is_valid(cache_entry)) {
return cache_entry.CacheState;
}
else {
return State:I;
}
}
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
if (is_valid(tbe)) {
tbe.TBEState := state;
}
if (is_valid(cache_entry)) {
cache_entry.CacheState := state;
}
}
AccessPermission getAccessPermission(Address addr) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
Entry cache_entry := getCacheEntry(addr);
if(is_valid(cache_entry)) {
return L1Cache_State_to_permission(cache_entry.CacheState);
}
return AccessPermission:NotPresent;
}
void setAccessPermission(Entry cache_entry, Address addr, State state) {
if (is_valid(cache_entry)) {
cache_entry.changePermission(L1Cache_State_to_permission(state));
}
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
return getCacheEntry(addr).DataBlk;
}
GenericMachineType getNondirectHitMachType(MachineID sender) {
if (machineIDToMachineType(sender) == MachineType:L1Cache) {
//
// NOTE direct local hits should not call this
//
return GenericMachineType:L1Cache_wCC;
} else {
return ConvertMachToGenericMach(machineIDToMachineType(sender));
}
}
// NETWORK PORTS
out_port(requestNetwork_out, RequestMsg, requestFromCache);
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
if (forwardRequestNetwork_in.isReady()) {
peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
Entry cache_entry := getCacheEntry(in_msg.Address);
TBE tbe := TBEs[in_msg.Address];
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
}
else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
}
else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
}
else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
}
else {
error("Unexpected message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg, block_on="Address") {
Entry cache_entry := getCacheEntry(in_msg.Address);
TBE tbe := TBEs[in_msg.Address];
if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.Address, cache_entry, tbe);
}
else {
error("Unexpected message");
}
}
}
}
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
if (is_invalid(cache_entry) &&
cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
// make room for the block
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
}
else {
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
cache_entry, TBEs[in_msg.LineAddress]);
}
}
}
}
// ACTIONS
action(a_issueRequest, "a", desc="Issue a request") {
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(b_issuePUT, "b", desc="Issue a PUT request") {
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
assert(is_valid(cache_entry));
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(e_sendData, "e", desc="Send data from cache to requestor") {
peek(forwardRequestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
assert(is_valid(cache_entry));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
peek(forwardRequestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := tbe.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
if (is_valid(cache_entry)) {
} else {
set_cache_entry(cacheMemory.allocate(address, new Entry));
}
}
action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
if (is_valid(cache_entry)) {
cacheMemory.deallocate(address);
unset_cache_entry();
}
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
mandatoryQueue_in.dequeue();
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
}
action(p_profileMiss, "p", desc="Profile cache miss") {
peek(mandatoryQueue_in, RubyRequest) {
cacheMemory.profileMiss(in_msg);
}
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
sequencer.readCallback(address,
GenericMachineType:L1Cache,
cache_entry.DataBlk);
}
action(rx_load_hit, "rx", desc="External load completed.") {
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
sequencer.readCallback(address,
getNondirectHitMachType(in_msg.Sender),
cache_entry.DataBlk);
}
}
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
sequencer.writeCallback(address,
GenericMachineType:L1Cache,
cache_entry.DataBlk);
}
action(sx_store_hit, "sx", desc="External store completed.") {
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
sequencer.writeCallback(address,
getNondirectHitMachType(in_msg.Sender),
cache_entry.DataBlk);
}
}
action(u_writeDataToCache, "u", desc="Write data to the cache") {
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
cache_entry.DataBlk := in_msg.DataBlk;
}
}
action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
if (send_evictions) {
DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
sequencer.evictionCallback(address);
}
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
TBEs.allocate(address);
set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
TBEs.deallocate(address);
unset_tbe();
}
action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
assert(is_valid(cache_entry));
assert(is_valid(tbe));
tbe.DataBlk := cache_entry.DataBlk;
}
action(z_stall, "z", desc="stall") {
// do nothing
}
// TRANSITIONS
transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
z_stall;
}
transition({IS, IM}, {Fwd_GETX, Inv}) {
z_stall;
}
transition(MI, Inv) {
o_popForwardedRequestQueue;
}
transition(M, Store) {
s_store_hit;
m_popMandatoryQueue;
}
transition(M, {Load, Ifetch}) {
r_load_hit;
m_popMandatoryQueue;
}
transition(I, Inv) {
o_popForwardedRequestQueue;
}
transition(I, Store, IM) {
v_allocateTBE;
i_allocateL1CacheBlock;
a_issueRequest;
p_profileMiss;
m_popMandatoryQueue;
}
transition(I, {Load, Ifetch}, IS) {
v_allocateTBE;
i_allocateL1CacheBlock;
a_issueRequest;
p_profileMiss;
m_popMandatoryQueue;
}
transition(IS, Data, M) {
u_writeDataToCache;
rx_load_hit;
w_deallocateTBE;
n_popResponseQueue;
}
transition(IM, Data, M) {
u_writeDataToCache;
sx_store_hit;
w_deallocateTBE;
n_popResponseQueue;
}
transition(M, Fwd_GETX, I) {
e_sendData;
forward_eviction_to_cpu;
o_popForwardedRequestQueue;
}
transition(I, Replacement) {
h_deallocateL1CacheBlock;
}
transition(M, {Replacement,Inv}, MI) {
v_allocateTBE;
b_issuePUT;
x_copyDataFromCacheToTBE;
forward_eviction_to_cpu;
h_deallocateL1CacheBlock;
}
transition(MI, Writeback_Ack, I) {
w_deallocateTBE;
o_popForwardedRequestQueue;
}
transition(MI, Fwd_GETX, II) {
ee_sendDataFromTBE;
o_popForwardedRequestQueue;
}
transition(MI, Writeback_Nack, MII) {
o_popForwardedRequestQueue;
}
transition(MII, Fwd_GETX, I) {
ee_sendDataFromTBE;
w_deallocateTBE;
o_popForwardedRequestQueue;
}
transition(II, Writeback_Nack, I) {
w_deallocateTBE;
o_popForwardedRequestQueue;
}
}

View File

@ -0,0 +1,632 @@
machine(Directory, "Directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
int directory_latency = 12
{
MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Read_Write, desc="Invalid";
M, AccessPermission:Invalid, desc="Modified";
M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
IM, AccessPermission:Busy, desc="Intermediate state I-->M";
MI, AccessPermission:Busy, desc="Intermediate state M-->I";
ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
}
// Events
enumeration(Event, desc="Directory events") {
// processor requests
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
PUTX, desc="A PUTX arrives";
PUTX_NotOwner, desc="A PUTX arrives";
// DMA requests
DMA_READ, desc="A DMA Read memory request";
DMA_WRITE, desc="A DMA Write memory request";
// Memory Controller
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
}
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
DataBlock DataBlk, desc="Data to be written (DMA write only)";
int Len, desc="...";
MachineID DmaRequestor, desc="DMA requestor";
}
structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// ** OBJECTS **
TBETable TBEs, template_hack="<Directory_TBE>";
void set_tbe(TBE b);
void unset_tbe();
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
dir_entry := static_cast(Entry, "pointer",
directory.allocate(addr, new Entry));
return dir_entry;
}
State getState(TBE tbe, Address addr) {
if (is_valid(tbe)) {
return tbe.TBEState;
} else if (directory.isPresent(addr)) {
return getDirectoryEntry(addr).DirectoryState;
} else {
return State:I;
}
}
void setState(TBE tbe, Address addr, State state) {
if (is_valid(tbe)) {
tbe.TBEState := state;
}
if (directory.isPresent(addr)) {
if (state == State:M) {
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
getDirectoryEntry(addr).DirectoryState := state;
if (state == State:I) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
directory.invalidateBlock(addr);
}
}
}
AccessPermission getAccessPermission(Address addr) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
if(directory.isPresent(addr)) {
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
}
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
}
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
return getDirectoryEntry(addr).DataBlk;
}
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
//added by SS
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
} else {
error("Invalid message");
}
}
}
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
TBE tbe := TBEs[in_msg.Address];
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address, tbe);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
trigger(Event:PUTX, in_msg.Address, tbe);
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address, tbe);
}
} else {
error("Invalid message");
}
}
}
}
//added by SS
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
TBE tbe := TBEs[in_msg.Address];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address, tbe);
} else {
DPRINTF(RubySlicc,"%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.OriginalRequestorMachId;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(c_clearOwner, "c", desc="Clear the owner field") {
getDirectoryEntry(address).Owner.clear();
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
peek(memQueue_in, MemoryMsg) {
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
assert(is_valid(tbe));
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(tbe.DmaRequestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
peek(requestQueue_in, RequestMsg) {
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
assert(is_valid(tbe));
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(tbe.DmaRequestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
assert(is_valid(tbe));
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:ACK;
out_msg.Destination.add(tbe.DmaRequestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
peek(requestQueue_in, RequestMsg) {
getDirectoryEntry(address).Owner.clear();
getDirectoryEntry(address).Owner.add(in_msg.Requestor);
}
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
APPEND_TRANSITION_COMMENT("Own: ");
APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Address).Owner);
APPEND_TRANSITION_COMMENT("Req: ");
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination := getDirectoryEntry(in_msg.Address).Owner;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
peek(dmaRequestQueue_in, DMARequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
requestQueue_in.dequeue();
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
dmaRequestQueue_in.dequeue();
}
action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
// assert(in_msg.Dirty);
// assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
//getDirectoryEntry(in_msg.Address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
}
}
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
assert(is_valid(tbe));
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
tbe.DmaRequestor := in_msg.Requestor;
}
}
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.DmaRequestor := in_msg.Requestor;
}
}
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
TBEs.deallocate(address);
unset_tbe();
}
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
requestQueue_in.recycle();
}
action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
dmaRequestQueue_in.recycle();
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
//out_msg.OriginalRequestorMachId := machineID;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
//out_msg.OriginalRequestorMachId := machineID;
//out_msg.DataBlk := in_msg.DataBlk;
out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
// get incoming data
// out_msg.DataBlk := in_msg.DataBlk;
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
//getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
assert(is_valid(tbe));
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
addressOffset(tbe.PhysicalAddress),
tbe.Len);
}
// TRANSITIONS
transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
z_recycleRequestQueue;
}
transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
z_recycleRequestQueue;
}
transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
y_recycleDMARequestQueue;
}
transition(I, GETX, IM) {
//d_sendData;
qf_queueMemoryFetchRequest;
e_ownerIsRequestor;
i_popIncomingRequestQueue;
}
transition(IM, Memory_Data, M) {
d_sendData;
//e_ownerIsRequestor;
l_popMemQueue;
}
transition(I, DMA_READ, ID) {
//dr_sendDMAData;
r_allocateTbeForDmaRead;
qf_queueMemoryFetchRequestDMA;
p_popIncomingDMARequestQueue;
}
transition(ID, Memory_Data, I) {
dr_sendDMAData;
//p_popIncomingDMARequestQueue;
w_deallocateTBE;
l_popMemQueue;
}
transition(I, DMA_WRITE, ID_W) {
v_allocateTBE;
qw_queueMemoryWBRequest_partial;
p_popIncomingDMARequestQueue;
}
transition(ID_W, Memory_Ack, I) {
dwt_writeDMADataFromTBE;
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
}
transition(M, DMA_READ, M_DRD) {
v_allocateTBE;
inv_sendCacheInvalidate;
p_popIncomingDMARequestQueue;
}
transition(M_DRD, PUTX, M_DRDI) {
l_writeDataToMemory;
drp_sendDMAData;
c_clearOwner;
l_queueMemoryWBRequest;
i_popIncomingRequestQueue;
}
transition(M_DRDI, Memory_Ack, I) {
l_sendWriteBackAck;
w_deallocateTBE;
l_popMemQueue;
}
transition(M, DMA_WRITE, M_DWR) {
v_allocateTBE;
inv_sendCacheInvalidate;
p_popIncomingDMARequestQueue;
}
transition(M_DWR, PUTX, M_DWRI) {
l_writeDataToMemory;
qw_queueMemoryWBRequest_partialTBE;
c_clearOwner;
i_popIncomingRequestQueue;
}
transition(M_DWRI, Memory_Ack, I) {
w_writeDataToMemoryFromTBE;
l_sendWriteBackAck;
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
}
transition(M, GETX, M) {
f_forwardRequest;
e_ownerIsRequestor;
i_popIncomingRequestQueue;
}
transition(M, PUTX, MI) {
l_writeDataToMemory;
c_clearOwner;
v_allocateTBEFromRequestNet;
l_queueMemoryWBRequest;
i_popIncomingRequestQueue;
}
transition(MI, Memory_Ack, I) {
w_writeDataToMemoryFromTBE;
l_sendWriteBackAck;
w_deallocateTBE;
l_popMemQueue;
}
transition(M, PUTX_NotOwner, M) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition(I, PUTX_NotOwner, I) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
}

View File

@ -0,0 +1,143 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
int request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request", no_vector="true";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
Data, desc="Data from a DMA memory read";
Ack, desc="DMA write to memory completed";
}
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
State cur_state, no_vector="true";
State getState(Address addr) {
return cur_state;
}
void setState(Address addr, State state) {
cur_state := state;
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("DMA Controller does not support getDataBlock function.\n");
}
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress);
} else {
error("Invalid request type");
}
}
}
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
if (dmaResponseQueue_in.isReady()) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
} else if (in_msg.Type == DMAResponseType:DATA) {
trigger(Event:Data, in_msg.LineAddress);
} else {
error("Invalid response type");
}
}
}
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
out_msg.Requestor := machineID;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
out_msg.Requestor := machineID;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
peek (dmaResponseQueue_in, DMAResponseMsg) {
dma_sequencer.ackCallback();
}
}
action(d_dataCallback, "d", desc="Write data to dma sequencer") {
peek (dmaResponseQueue_in, DMAResponseMsg) {
dma_sequencer.dataCallback(in_msg.DataBlk);
}
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
dmaRequestQueue_in.dequeue();
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
dmaResponseQueue_in.dequeue();
}
transition(READY, ReadRequest, BUSY_RD) {
s_sendReadRequest;
p_popRequestQueue;
}
transition(READY, WriteRequest, BUSY_WR) {
s_sendWriteRequest;
p_popRequestQueue;
}
transition(BUSY_RD, Data, READY) {
d_dataCallback;
p_popResponseQueue;
}
transition(BUSY_WR, Ack, READY) {
a_ackCallback;
p_popResponseQueue;
}
}

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_SMP_directory-msg.sm 1.8 05/01/19 15:48:36-06:00 mikem@royal16.cs.wisc.edu $
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
PUTX, desc="Put eXclusive";
PUTO, desc="Put Owned";
WB_ACK, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
INV, desc="Invalidation";
FWD, desc="Generic FWD";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
ACK, desc="ACKnowledgment, responder doesn't have a copy";
DATA, desc="Data";
DATA_EXCLUSIVE_CLEAN, desc="Data, no other processor has a copy, data is clean";
DATA_EXCLUSIVE_DIRTY, desc="Data, no other processor has a copy, data is dirty";
UNBLOCK, desc="Unblock";
UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
WRITEBACK_CLEAN, desc="Clean writeback (no data)";
WRITEBACK_DIRTY, desc="Dirty writeback (contains data)";
WRITEBACK, desc="Generic writeback (contains data)";
}
// TriggerType
enumeration(TriggerType, desc="...") {
ALL_ACKS, desc="See corresponding event";
}
// TriggerMsg
structure(TriggerMsg, desc="...", interface="Message") {
Address Address, desc="Physical address for this request";
TriggerType Type, desc="Type of trigger";
}
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
DataBlock DataBlk, desc="data for the cache line";
MessageSizeType MessageSize, desc="size category of the message";
}
// ResponseMsg (and also unblock requests)
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
MessageSizeType MessageSize, desc="size category of the message";
}
enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
READ, desc="Memory Read";
WRITE, desc="Memory Write";
NULL, desc="Invalid";
}
enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
DATA, desc="DATA read";
ACK, desc="ACK write";
NULL, desc="Invalid";
}
structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
DMARequestType Type, desc="Request type (read/write)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
int Len, desc="The length of the request";
MessageSizeType MessageSize, desc="size category of the message";
}
structure(DMAResponseMsg, desc="...", interface="NetworkMessage") {
DMAResponseType Type, desc="Response type (DATA/ACK)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
MessageSizeType MessageSize, desc="size category of the message";
}

View File

@ -0,0 +1,6 @@
protocol "MI_example";
include "RubySlicc_interfaces.slicc";
include "MI_example-msg.sm";
include "MI_example-cache.sm";
include "MI_example-dir.sm";
include "MI_example-dma.sm";

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,923 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
machine(Directory, "Directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
int directory_latency = 6
{
// ** IN QUEUES **
MessageBuffer foo1, network="From", virtual_network="0", ordered="false", vnet_type="foo"; // a mod-L2 bank -> this Dir
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
MessageBuffer goo1, network="To", virtual_network="0", ordered="false", vnet_type="goo";
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Read_Write, desc="Invalid";
S, AccessPermission:Read_Only, desc="Shared";
O, AccessPermission:Maybe_Stale, desc="Owner";
M, AccessPermission:Maybe_Stale, desc="Modified";
IS, AccessPermission:Busy, desc="Blocked, was in idle";
SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
OO, AccessPermission:Busy, desc="Blocked, was in owned";
MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
MM, AccessPermission:Busy, desc="Blocked, going to modified";
MM_DMA, AccessPermission:Busy, desc="Blocked, going to I";
MI, AccessPermission:Busy, desc="Blocked on a writeback";
MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
OS, AccessPermission:Busy, desc="Blocked on a writeback";
OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
}
// Events
enumeration(Event, desc="Directory events") {
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
PUTX, desc="A PUTX arrives";
PUTO, desc="A PUTO arrives";
PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
Unblock, desc="An unblock message arrives";
Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
DMA_READ, desc="DMA Read";
DMA_WRITE, desc="DMA Write";
DMA_ACK, desc="DMA Ack";
Data, desc="Data to directory";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface='AbstractEntry') {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
structure(TBE, desc="...") {
Address PhysicalAddress, desc="Physical address for this entry";
int Len, desc="Length of request";
DataBlock DataBlk, desc="DataBlk";
MachineID Requestor, desc="original requestor";
}
structure(TBETable, external = "yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// ** OBJECTS **
TBETable TBEs, template_hack="<Directory_TBE>";
void set_tbe(TBE b);
void unset_tbe();
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
dir_entry := static_cast(Entry, "pointer",
directory.allocate(addr, new Entry));
return dir_entry;
}
State getState(TBE tbe, Address addr) {
return getDirectoryEntry(addr).DirectoryState;
}
void setState(TBE tbe, Address addr, State state) {
if (directory.isPresent(addr)) {
if (state == State:I) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
if (state == State:S) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
}
if (state == State:O) {
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
}
if (state == State:M) {
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
if ((state != State:SS) && (state != State:OO)) {
assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
}
if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
getDirectoryEntry(addr).DirectoryState := state;
// disable coherence checker
// sequencer.checkCoherence(addr);
}
else {
getDirectoryEntry(addr).DirectoryState := state;
}
}
}
AccessPermission getAccessPermission(Address addr) {
if (directory.isPresent(addr)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
}
DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
}
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
return getDirectoryEntry(addr).DataBlk;
}
// if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
bool isBlockShared(Address addr) {
if (directory.isPresent(addr)) {
if (getDirectoryEntry(addr).DirectoryState == State:I) {
return true;
}
}
return false;
}
bool isBlockExclusive(Address addr) {
if (directory.isPresent(addr)) {
if (getDirectoryEntry(addr).DirectoryState == State:I) {
return true;
}
}
return false;
}
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(goo1_out, ResponseMsg, goo1);
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
in_port(foo1_in, ResponseMsg, foo1) {
}
// in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
// if (unblockNetwork_in.isReady()) {
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.Address,
TBEs[in_msg.Address]);
} else {
trigger(Event:Unblock, in_msg.Address,
TBEs[in_msg.Address]);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_ACK, in_msg.Address,
TBEs[in_msg.Address]);
} else {
error("Invalid message");
}
}
}
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else {
error("Invalid message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(c_clearOwner, "c", desc="Clear the owner field") {
getDirectoryEntry(address).Owner.clear();
}
action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
getDirectoryEntry(address).Owner.clear();
}
action(cc_clearSharers, "\c", desc="Clear the sharers field") {
getDirectoryEntry(address).Sharers.clear();
}
action(d_sendDataMsg, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
//out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Acks := in_msg.Acks;
if (in_msg.ReadX) {
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
} else {
out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(address).Owner.clear();
getDirectoryEntry(address).Owner.add(in_msg.Sender);
}
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := machineID;
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
// out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
out_msg.Destination.remove(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Invalidate_Control;
}
}
}
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
requestQueue_in.dequeue();
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
unblockNetwork_in.dequeue();
}
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Address, in_msg.DataBlk);
}
}
action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Address, in_msg.DataBlk);
}
}
action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
}
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(address).Sharers.add(in_msg.Sender);
}
}
action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
}
action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
}
action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// These are not used by memory but are passed back here with the read data:
out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(unblockNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
if (is_valid(tbe)) {
out_msg.OriginalRequestorMachId := tbe.Requestor;
}
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// Not used:
out_msg.ReadX := false;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// Not used:
out_msg.ReadX := false;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
requestQueue_in.recycle();
}
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
out_msg.Type := CoherenceResponseType:DMA_ACK;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
peek(unblockNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
if (is_valid(tbe)) {
out_msg.Destination.add(tbe.Requestor);
}
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
out_msg.Type := CoherenceResponseType:DMA_ACK;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
peek(requestQueue_in, RequestMsg) {
getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
}
}
action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
assert(is_valid(tbe));
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
addressOffset(tbe.PhysicalAddress), tbe.Len);
}
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
peek (requestQueue_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.PhysicalAddress := in_msg.Address;
tbe.Len := in_msg.Len;
tbe.DataBlk := in_msg.DataBlk;
tbe.Requestor := in_msg.Requestor;
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
TBEs.deallocate(address);
unset_tbe();
}
// TRANSITIONS
transition(I, GETX, MM) {
qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition(I, DMA_READ, XI_M) {
qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition(I, DMA_WRITE, XI_U) {
qw_queueMemoryWBRequest2;
a_sendDMAAck; // ack count may be zero
l_writeDMADataToMemory;
i_popIncomingRequestQueue;
}
transition(XI_M, Memory_Data, I) {
d_sendDataMsg; // ack count may be zero
q_popMemQueue;
}
transition(XI_U, Exclusive_Unblock, I) {
cc_clearSharers;
c_clearOwner;
j_popIncomingUnblockQueue;
}
transition(S, GETX, MM) {
qf_queueMemoryFetchRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition(S, DMA_READ) {
//qf_queueMemoryFetchRequest;
p_fwdDataToDMA;
//g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
i_popIncomingRequestQueue;
}
transition(S, DMA_WRITE, XI_U) {
qw_queueMemoryWBRequest2;
a_sendDMAAck; // ack count may be zero
l_writeDMADataToMemory;
g_sendInvalidations; // the DMA will collect invalidations
i_popIncomingRequestQueue;
}
transition(I, GETS, IS) {
qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
qf_queueMemoryFetchRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition({I, S}, PUTO) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition({I, S, O}, PUTX) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition(O, GETX, MM) {
f_forwardRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition(O, DMA_READ, OD) {
f_forwardRequest; // this will cause the data to go to DMA directly
//g_sendInvalidations; // this will cause acks to be sent to the DMA
i_popIncomingRequestQueue;
}
transition(OD, DMA_ACK, O) {
j_popIncomingUnblockQueue;
}
transition({O,M}, DMA_WRITE, OI_D) {
f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
g_sendInvalidations; // these go to the DMA Controller
v_allocateTBE;
i_popIncomingRequestQueue;
}
transition(OI_D, Data, XI_U) {
qw_queueMemoryWBRequest;
a_sendDMAAck2; // ack count may be zero
p_writeFwdDataToMemory;
l_writeDMADataToMemoryFromTBE;
w_deallocateTBE;
j_popIncomingUnblockQueue;
}
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition(M, GETX, MM) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
// no exclusive unblock will show up to the directory
transition(M, DMA_READ, MD) {
f_forwardRequest; // this will cause the data to go to DMA directly
i_popIncomingRequestQueue;
}
transition(MD, DMA_ACK, M) {
j_popIncomingUnblockQueue;
}
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
transition(M, PUTX, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
// happens if M->O transition happens on-chip
transition(M, PUTO, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(M, PUTO_SHARERS, MIS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO, OS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO_SHARERS, OSS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
zz_recycleRequest;
}
transition({MM, MO}, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(MO, Unblock, O) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
zz_recycleRequest;
}
transition(IS, GETS) {
zz_recycleRequest;
}
transition(IS, Unblock, S) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition(IS, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(SS, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(SS, Last_Unblock, S) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Last_Unblock, O) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(MI, Dirty_Writeback, I) {
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Clean_Writeback, S) {
c_moveOwnerToSharer;
j_popIncomingUnblockQueue;
}
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Clean_Writeback, S) {
c_moveOwnerToSharer;
j_popIncomingUnblockQueue;
}
transition(MI, Clean_Writeback, I) {
c_clearOwner;
cc_clearSharers;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition(OS, Clean_Writeback, S) {
c_clearOwner;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition({MI, MIS}, Unblock, M) {
j_popIncomingUnblockQueue;
}
transition({OS, OSS}, Unblock, O) {
j_popIncomingUnblockQueue;
}
transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
d_sendDataMsg;
q_popMemQueue;
}
transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
//a_sendAck;
q_popMemQueue;
}
}

View File

@ -0,0 +1,299 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
int request_latency = 14,
int response_latency = 14
{
MessageBuffer goo1, network="From", virtual_network="0", ordered="false", vnet_type="goo";
MessageBuffer goo2, network="From", virtual_network="1", ordered="false", vnet_type="goo";
MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
MessageBuffer foo1, network="To", virtual_network="0", ordered="false", vnet_type="foo";
MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
Data, desc="Data from a DMA memory read";
DMA_Ack, desc="DMA write to memory completed";
Inv_Ack, desc="Invalidation Ack from a sharer";
All_Acks, desc="All acks received";
}
structure(TBE, desc="...") {
Address address, desc="Physical address";
int NumAcks, default="0", desc="Number of Acks pending";
DataBlock DataBlk, desc="Data";
}
structure(DMASequencer, external = "yes") {
void ackCallback();
void dataCallback(DataBlock);
}
structure(TBETable, external = "yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
MessageBuffer mandatoryQueue, ordered="false";
MessageBuffer triggerQueue, ordered="true";
TBETable TBEs, template_hack="<DMA_TBE>";
State cur_state;
void set_tbe(TBE b);
void unset_tbe();
State getState(TBE tbe, Address addr) {
return cur_state;
}
void setState(TBE tbe, Address addr, State state) {
cur_state := state;
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("DMA Controller does not support getDataBlock().\n");
}
out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
out_port(foo1_out, ResponseMsg, foo1, desc="...");
out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
in_port(goo1_in, RequestMsg, goo1) {
if (goo1_in.isReady()) {
peek(goo1_in, RequestMsg) {
assert(false);
}
}
}
in_port(goo2_in, RequestMsg, goo2) {
if (goo2_in.isReady()) {
peek(goo2_in, RequestMsg) {
assert(false);
}
}
}
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress,
TBEs[in_msg.LineAddress]);
} else {
error("Invalid request type");
}
}
}
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
if (dmaResponseQueue_in.isReady()) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else {
error("Invalid response type");
}
}
}
}
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.Address, TBEs[in_msg.Address]);
} else {
error("Unexpected message");
}
}
}
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
out_msg.Address := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_READ;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.Requestor := machineID;
out_msg.RequestorMachine := MachineType:DMA;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
out_msg.Address := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_WRITE;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.Requestor := machineID;
out_msg.RequestorMachine := MachineType:DMA;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
dma_sequencer.ackCallback();
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
assert(is_valid(tbe));
if (tbe.NumAcks == 0) {
enqueue(triggerQueue_out, TriggerMsg) {
out_msg.Address := address;
out_msg.Type := TriggerType:ALL_ACKS;
}
}
}
action(u_updateAckCount, "u", desc="Update ack count") {
peek(dmaResponseQueue_in, ResponseMsg) {
assert(is_valid(tbe));
tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
}
}
action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:DMA;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
dmaRequestQueue_in.dequeue();
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
dmaResponseQueue_in.dequeue();
}
action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
triggerQueue_in.dequeue();
}
action(t_updateTBEData, "t", desc="Update TBE Data") {
peek(dmaResponseQueue_in, ResponseMsg) {
assert(is_valid(tbe));
tbe.DataBlk := in_msg.DataBlk;
}
}
action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
assert(is_valid(tbe));
dma_sequencer.dataCallback(tbe.DataBlk);
}
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
TBEs.allocate(address);
set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
TBEs.deallocate(address);
unset_tbe();
}
action(z_stall, "z", desc="dma is busy..stall") {
// do nothing
}
transition(READY, ReadRequest, BUSY_RD) {
s_sendReadRequest;
v_allocateTBE;
p_popRequestQueue;
}
transition(BUSY_RD, Inv_Ack) {
u_updateAckCount;
o_checkForCompletion;
p_popResponseQueue;
}
transition(BUSY_RD, Data, READY) {
t_updateTBEData;
d_dataCallbackFromTBE;
w_deallocateTBE;
//u_updateAckCount;
//o_checkForCompletion;
p_popResponseQueue;
}
transition(BUSY_RD, All_Acks, READY) {
d_dataCallbackFromTBE;
//u_sendExclusiveUnblockToDir;
w_deallocateTBE;
p_popTriggerQueue;
}
transition(READY, WriteRequest, BUSY_WR) {
s_sendWriteRequest;
v_allocateTBE;
p_popRequestQueue;
}
transition(BUSY_WR, Inv_Ack) {
u_updateAckCount;
o_checkForCompletion;
p_popResponseQueue;
}
transition(BUSY_WR, DMA_Ack) {
u_updateAckCount; // actually increases
o_checkForCompletion;
p_popResponseQueue;
}
transition(BUSY_WR, All_Acks, READY) {
a_ackCallback;
u_sendExclusiveUnblockToDir;
w_deallocateTBE;
p_popTriggerQueue;
}
}

View File

@ -0,0 +1,104 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
PUTX, desc="Put eXclusive";
PUTO, desc="Put Owned";
PUTO_SHARERS, desc="Put Owned, but sharers exist so don't remove from sharers list";
PUTS, desc="Put Shared";
WB_ACK, desc="Writeback ack";
WB_ACK_DATA, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
INV, desc="Invalidation";
DMA_READ, desc="DMA Read";
DMA_WRITE, desc="DMA Write";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
ACK, desc="ACKnowledgment, responder doesn't have a copy";
DATA, desc="Data";
DATA_EXCLUSIVE, desc="Data, no processor has a copy";
UNBLOCK, desc="Unblock";
UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
WRITEBACK_CLEAN_DATA, desc="Clean writeback (contains data)";
WRITEBACK_CLEAN_ACK, desc="Clean writeback (contains no data)";
WRITEBACK_DIRTY_DATA, desc="Dirty writeback (contains data)";
DMA_ACK, desc="Ack that a DMA write completed";
}
// TriggerType
enumeration(TriggerType, desc="...") {
ALL_ACKS, desc="See corresponding event";
}
// TriggerMsg
structure(TriggerMsg, desc="...", interface="Message") {
Address Address, desc="Physical address for this request";
TriggerType Type, desc="Type of trigger";
}
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
int Len, desc="Length of Request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
MachineType RequestorMachine, desc="type of component";
NetDest Destination, desc="Multicast destination mask";
DataBlock DataBlk, desc="data for the cache line (DMA WRITE request)";
int Acks, desc="How many acks to expect";
MessageSizeType MessageSize, desc="size category of the message";
RubyAccessMode AccessMode, desc="user/supervisor access type";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// ResponseMsg (and also unblock requests)
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
MachineType SenderMachine, desc="type of component sending msg";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
int Acks, desc="How many acks to expect";
MessageSizeType MessageSize, desc="size category of the message";
}

View File

@ -0,0 +1,7 @@
protocol "MOESI_CMP_directory";
include "RubySlicc_interfaces.slicc";
include "MOESI_CMP_directory-msg.sm";
include "MOESI_CMP_directory-L2cache.sm";
include "MOESI_CMP_directory-L1cache.sm";
include "MOESI_CMP_directory-dma.sm";
include "MOESI_CMP_directory-dir.sm";

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
int request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", vnet_type="response", no_vector="true";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request", no_vector="true";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
Data, desc="Data from a DMA memory read";
Ack, desc="DMA write to memory completed";
}
structure(DMASequencer, external="yes") {
void ackCallback();
void dataCallback(DataBlock);
}
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
State cur_state, no_vector="true";
State getState(Address addr) {
return cur_state;
}
void setState(Address addr, State state) {
cur_state := state;
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("DMA Controller does not support getDataBlock function.\n");
}
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress);
} else {
error("Invalid request type");
}
}
}
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
if (dmaResponseQueue_in.isReady()) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
} else if (in_msg.Type == DMAResponseType:DATA) {
trigger(Event:Data, in_msg.LineAddress);
} else {
error("Invalid response type");
}
}
}
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
out_msg.Requestor := machineID;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
out_msg.Requestor := machineID;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
peek (dmaResponseQueue_in, DMAResponseMsg) {
dma_sequencer.ackCallback();
}
}
action(d_dataCallback, "d", desc="Write data to dma sequencer") {
peek (dmaResponseQueue_in, DMAResponseMsg) {
dma_sequencer.dataCallback(in_msg.DataBlk);
}
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
dmaRequestQueue_in.dequeue();
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
dmaResponseQueue_in.dequeue();
}
transition(READY, ReadRequest, BUSY_RD) {
s_sendReadRequest;
p_popRequestQueue;
}
transition(READY, WriteRequest, BUSY_WR) {
s_sendWriteRequest;
p_popRequestQueue;
}
transition(BUSY_RD, Data, READY) {
d_dataCallback;
p_popResponseQueue;
}
transition(BUSY_WR, Ack, READY) {
a_ackCallback;
p_popResponseQueue;
}
}

View File

@ -0,0 +1,140 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
}
// PersistentType
enumeration(PersistentRequestType, desc="...") {
GETX_PERSISTENT, desc="...";
GETS_PERSISTENT, desc="...";
DEACTIVATE_PERSISTENT,desc="...";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
DATA_OWNER, desc="Data";
ACK_OWNER, desc="data-less owner token";
DATA_SHARED, desc="Data";
ACK, desc="ACKnowledgment";
WB_TOKENS, desc="L1 to L2 writeback";
WB_SHARED_DATA, desc="L1 to L2 writeback with data";
WB_OWNED, desc="L1 to L2 writeback with data";
INV, desc="L1 informing L2 of loss of all tokens";
}
// TriggerType
enumeration(TriggerType, desc="...") {
REQUEST_TIMEOUT, desc="See corresponding event";
USE_TIMEOUT, desc="See corresponding event";
DATA, desc="data for dma read response";
DATA_ALL_TOKENS, desc="data and all tokens for dma write response";
}
// TriggerMsg
structure(TriggerMsg, desc="...", interface="Message") {
Address Address, desc="Physical address for this request";
TriggerType Type, desc="Type of trigger";
}
// PersistentMsg
structure(PersistentMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
PersistentRequestType Type, desc="Type of starvation request";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Destination set";
MessageSizeType MessageSize, desc="size category of the message";
RubyAccessMode AccessMode, desc="user/supervisor access type";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// RequestMsg
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
bool isLocal, desc="Is this request from a local L1";
int RetryNum, desc="retry sequence number";
MessageSizeType MessageSize, desc="size category of the message";
RubyAccessMode AccessMode, desc="user/supervisor access type";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// ResponseMsg
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
NetDest Destination, desc="Node to whom the data is sent";
int Tokens, desc="Number of tokens being transfered for this line";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
MessageSizeType MessageSize, desc="size category of the message";
}
enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
READ, desc="Memory Read";
WRITE, desc="Memory Write";
NULL, desc="Invalid";
}
enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
DATA, desc="DATA read";
ACK, desc="ACK write";
NULL, desc="Invalid";
}
structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
DMARequestType Type, desc="Request type (read/write)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
int Len, desc="The length of the request";
MessageSizeType MessageSize, desc="size category of the message";
}
structure(DMAResponseMsg, desc="...", interface="NetworkMessage") {
DMAResponseType Type, desc="Response type (DATA/ACK)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
MessageSizeType MessageSize, desc="size category of the message";
}

View File

@ -0,0 +1,7 @@
protocol "MOESI_CMP_token";
include "RubySlicc_interfaces.slicc";
include "MOESI_CMP_token-msg.sm";
include "MOESI_CMP_token-L1cache.sm";
include "MOESI_CMP_token-L2cache.sm";
include "MOESI_CMP_token-dir.sm";
include "MOESI_CMP_token-dma.sm";

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
int request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request", no_vector="true";
state_declaration(State,
desc="DMA states",
default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
Data, desc="Data from a DMA memory read";
Ack, desc="DMA write to memory completed";
}
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
State cur_state, no_vector="true";
State getState(Address addr) {
return cur_state;
}
void setState(Address addr, State state) {
cur_state := state;
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("DMA Controller does not support getDataBlock function.\n");
}
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress);
} else {
error("Invalid request type");
}
}
}
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
if (dmaResponseQueue_in.isReady()) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
} else if (in_msg.Type == DMAResponseType:DATA) {
trigger(Event:Data, in_msg.LineAddress);
} else {
error("Invalid response type");
}
}
}
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
out_msg.Requestor := machineID;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
out_msg.Requestor := machineID;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Len := in_msg.Len;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
peek (dmaResponseQueue_in, DMAResponseMsg) {
dma_sequencer.ackCallback();
}
}
action(d_dataCallback, "d", desc="Write data to dma sequencer") {
peek (dmaResponseQueue_in, DMAResponseMsg) {
dma_sequencer.dataCallback(in_msg.DataBlk);
}
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
dmaRequestQueue_in.dequeue();
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
dmaResponseQueue_in.dequeue();
}
transition(READY, ReadRequest, BUSY_RD) {
s_sendReadRequest;
p_popRequestQueue;
}
transition(READY, WriteRequest, BUSY_WR) {
s_sendWriteRequest;
p_popRequestQueue;
}
transition(BUSY_RD, Data, READY) {
d_dataCallback;
p_popResponseQueue;
}
transition(BUSY_WR, Ack, READY) {
a_ackCallback;
p_popResponseQueue;
}
}

View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* AMD's contributions to the MOESI hammer protocol do not constitute an
* endorsement of its similarity to any AMD products.
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
MERGED_GETS, desc="Get Shared";
PUT, desc="Put Ownership";
WB_ACK, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
PUTF, desc="PUT on a Flush";
GETF, desc="Issue exclusive for Flushing";
BLOCK_ACK, desc="Dir Block ack";
INV, desc="Invalidate";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
ACK, desc="ACKnowledgment, responder does not have a copy";
ACK_SHARED, desc="ACKnowledgment, responder has a shared copy";
DATA, desc="Data, responder does not have a copy";
DATA_SHARED, desc="Data, responder has a shared copy";
DATA_EXCLUSIVE, desc="Data, responder was exclusive, gave us a copy, and they went to invalid";
WB_CLEAN, desc="Clean writeback";
WB_DIRTY, desc="Dirty writeback";
WB_EXCLUSIVE_CLEAN, desc="Clean writeback of exclusive data";
WB_EXCLUSIVE_DIRTY, desc="Dirty writeback of exclusive data";
UNBLOCK, desc="Unblock for writeback";
UNBLOCKS, desc="Unblock now in S";
UNBLOCKM, desc="Unblock now in M/O/E";
NULL, desc="Null value";
}
// TriggerType
enumeration(TriggerType, desc="...") {
L2_to_L1, desc="L2 to L1 transfer";
ALL_ACKS, desc="See corresponding event";
ALL_ACKS_OWNER_EXISTS,desc="See corresponding event";
ALL_ACKS_NO_SHARERS, desc="See corresponding event";
ALL_UNBLOCKS, desc="all unblockS received";
}
// TriggerMsg
structure(TriggerMsg, desc="...", interface="Message") {
Address Address, desc="Physical address for this request";
TriggerType Type, desc="Type of trigger";
}
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest MergedRequestors, desc="Merge set of read requestors";
NetDest Destination, desc="Multicast destination mask";
MessageSizeType MessageSize, desc="size category of the message";
bool DirectedProbe, default="false", desc="probe filter directed probe";
Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
int SilentAcks, default="0", desc="silent acks from the full-bit directory";
}
// ResponseMsg (and also unblock requests)
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
int Acks, default="0", desc="How many messages this counts as";
MessageSizeType MessageSize, desc="size category of the message";
Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
int SilentAcks, default="0", desc="silent acks from the full-bit directory";
}
enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
READ, desc="Memory Read";
WRITE, desc="Memory Write";
NULL, desc="Invalid";
}
enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
DATA, desc="DATA read";
ACK, desc="ACK write";
NULL, desc="Invalid";
}
structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
DMARequestType Type, desc="Request type (read/write)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
int Len, desc="The length of the request";
MessageSizeType MessageSize, desc="size category of the message";
}
structure(DMAResponseMsg, desc="...", interface="NetworkMessage") {
DMAResponseType Type, desc="Response type (DATA/ACK)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
MessageSizeType MessageSize, desc="size category of the message";
}

View File

@ -0,0 +1,6 @@
protocol "MOESI_hammer";
include "RubySlicc_interfaces.slicc";
include "MOESI_hammer-msg.sm";
include "MOESI_hammer-cache.sm";
include "MOESI_hammer-dir.sm";
include "MOESI_hammer-dma.sm";

View File

@ -0,0 +1,228 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Authors: Brad Beckmann
* Tushar Krishna
*/
machine(L1Cache, "Network_test L1 Cache")
: Sequencer * sequencer,
int issue_latency = 2
{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false", vnet_type = "request";
MessageBuffer forwardFromCache, network="To", virtual_network="1", ordered="false", vnet_type = "forward";
MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false", vnet_type = "response";
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";
}
// EVENTS
enumeration(Event, desc="Cache events") {
// From processor
Request, desc="Request from Network_test";
Forward, desc="Forward from Network_test";
Response, desc="Response from Network_test";
}
// STRUCTURE DEFINITIONS
MessageBuffer mandatoryQueue, ordered="false";
DataBlock dummyData;
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="Data in the block";
}
// TBE fields
structure(TBE, desc="...") {
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
}
structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// STRUCTURES
TBETable TBEs, template_hack="<L1Cache_TBE>";
// FUNCTIONS
// cpu/testers/networktest/networktest.cc generates packets of the type
// ReadReq, INST_FETCH, and WriteReq.
// These are converted to LD, IFETCH and ST by mem/ruby/system/RubyPort.cc.
// These are then sent to the sequencer, which sends them here.
// Network_test-cache.sm tags LD, IFETCH and ST as Request, Forward,
// and Response Events respectively, which are then injected into
// virtual networks 0, 1 and 2 respectively.
// This models traffic of different types within the network.
//
// Note that requests and forwards are MessageSizeType:Control,
// while responses are MessageSizeType:Data.
//
Event mandatory_request_type_to_event(RubyRequestType type) {
if (type == RubyRequestType:LD) {
return Event:Request;
} else if (type == RubyRequestType:IFETCH) {
return Event:Forward;
} else if (type == RubyRequestType:ST) {
return Event:Response;
} else {
error("Invalid RubyRequestType");
}
}
State getState(TBE tbe, Entry cache_entry, Address addr) {
return State:I;
}
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Entry cache_entry, Address addr, State state) {
}
Entry getCacheEntry(Address address), return_by_pointer="yes" {
return OOD;
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("Network Test does not support get data block.");
}
// NETWORK PORTS
out_port(requestNetwork_out, RequestMsg, requestFromCache);
out_port(forwardNetwork_out, RequestMsg, forwardFromCache);
out_port(responseNetwork_out, RequestMsg, responseFromCache);
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
getCacheEntry(in_msg.LineAddress),
TBEs[in_msg.LineAddress]);
}
}
}
// ACTIONS
// The destination directory of the packets is embedded in the address
// map_Address_to_Directory is used to retrieve it.
action(a_issueRequest, "a", desc="Issue a request") {
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:MSG;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
//out_msg.Destination := broadcast(MachineType:Directory);
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(b_issueForward, "b", desc="Issue a forward") {
enqueue(forwardNetwork_out, RequestMsg, latency=issue_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:MSG;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(c_issueResponse, "c", desc="Issue a response") {
enqueue(responseNetwork_out, RequestMsg, latency=issue_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:MSG;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
mandatoryQueue_in.dequeue();
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
sequencer.readCallback(address, dummyData);
}
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
sequencer.writeCallback(address, dummyData);
}
// TRANSITIONS
// sequencer hit call back is performed after injecting the packets.
// The goal of the Network_test protocol is only to inject packets into
// the network, not to keep track of them via TBEs.
transition(I, Response) {
s_store_hit;
c_issueResponse;
m_popMandatoryQueue;
}
transition(I, Request) {
r_load_hit;
a_issueRequest;
m_popMandatoryQueue;
}
transition(I, Forward) {
r_load_hit;
b_issueForward;
m_popMandatoryQueue;
}
}

View File

@ -0,0 +1,147 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Authors: Brad Beckmann
* Tushar Krishna
*/
machine(Directory, "Network_test Directory")
:
{
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type = "request";
MessageBuffer forwardToDir, network="From", virtual_network="1", ordered="false", vnet_type = "forward";
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type = "response";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Invalid, desc="Invalid";
}
// Events
enumeration(Event, desc="Directory events") {
// processor requests
Receive_Request, desc="Receive Message";
Receive_Forward, desc="Receive Message";
Receive_Response, desc="Receive Message";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
}
// ** OBJECTS **
State getState(Address addr) {
return State:I;
}
void setState(Address addr, State state) {
}
AccessPermission getAccessPermission(Address addr) {
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
error("Network Test does not support get data block.");
}
// ** IN_PORTS **
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Request, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(forwardQueue_in, RequestMsg, forwardToDir) {
if (forwardQueue_in.isReady()) {
peek(forwardQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Forward, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(responseQueue_in, RequestMsg, responseToDir) {
if (responseQueue_in.isReady()) {
peek(responseQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Response, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
// Actions
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
requestQueue_in.dequeue();
}
action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
forwardQueue_in.dequeue();
}
action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
responseQueue_in.dequeue();
}
// TRANSITIONS
// The directory simply drops the received packets.
// The goal of Network_test is only to track network stats.
transition(I, Receive_Request) {
i_popIncomingRequestQueue;
}
transition(I, Receive_Forward) {
f_popIncomingForwardQueue;
}
transition(I, Receive_Response) {
r_popIncomingResponseQueue;
}
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
MSG, desc="Message";
}
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
DataBlock DataBlk, desc="data for the cache line";
MessageSizeType MessageSize, desc="size category of the message";
}
enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
READ, desc="Memory Read";
WRITE, desc="Memory Write";
NULL, desc="Invalid";
}
enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
DATA, desc="DATA read";
ACK, desc="ACK write";
NULL, desc="Invalid";
}
structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
DMARequestType Type, desc="Request type (read/write)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
int Len, desc="The length of the request";
MessageSizeType MessageSize, desc="size category of the message";
}
structure(DMAResponseMsg, desc="...", interface="NetworkMessage") {
DMAResponseType Type, desc="Response type (DATA/ACK)";
Address PhysicalAddress, desc="Physical address for this request";
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
MessageSizeType MessageSize, desc="size category of the message";
}

View File

@ -0,0 +1,5 @@
protocol "Network_test";
include "RubySlicc_interfaces.slicc";
include "Network_test-msg.sm";
include "Network_test-cache.sm";
include "Network_test-dir.sm";

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Mapping functions
int machineCount(MachineType machType);
MachineID mapAddressToRange(Address addr, MachineType type, int low, int high);
NetDest broadcast(MachineType type);
MachineID map_Address_to_DMA(Address addr);
MachineID map_Address_to_Directory(Address addr);
NodeID map_Address_to_DirectoryNode(Address addr);
NodeID machineIDToNodeID(MachineID machID);
NodeID machineIDToVersion(MachineID machID);
MachineType machineIDToMachineType(MachineID machID);
GenericMachineType ConvertMachToGenericMach(MachineType machType);

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Hack, no node object since base class has them
NodeID id, no_chip_object="yes", no_vector="yes", abstract_chip_ptr="true";
NodeID version, no_chip_object="yes", no_vector="yes", abstract_chip_ptr="true";
MachineID machineID, no_chip_object="yes", no_vector="yes", abstract_chip_ptr="true";

View File

@ -0,0 +1,347 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// defines
external_type(int, primitive="yes", default="0");
external_type(bool, primitive="yes", default="false");
external_type(std::string, primitive="yes");
external_type(uint64, primitive="yes");
external_type(Time, primitive="yes", default="0");
external_type(Address);
structure(DataBlock, external = "yes", desc="..."){
void clear();
void copyPartial(DataBlock, int, int);
}
// Declarations of external types that are common to all protocols
// AccessPermission
// The following five states define the access permission of all memory blocks.
// These permissions have multiple uses. They coordinate locking and
// synchronization primitives, as well as enable functional accesses.
// One should not need to add any additional permission values and it is very
// risky to do so.
enumeration(AccessPermission, desc="...", default="AccessPermission_NotPresent") {
// Valid data
Read_Only, desc="block is Read Only (modulo functional writes)";
Read_Write, desc="block is Read/Write";
// Possibly Invalid data
// The maybe stale permission indicates that accordingly to the protocol,
// there is no guarantee the block contains valid data. However, functional
// writes should update the block because a dataless PUT request may
// revalidate the block's data.
Maybe_Stale, desc="block can be stale or revalidated by a dataless PUT";
// In Broadcast/Snoop protocols, memory has no idea if it is exclusive owner
// or not of a block, making it hard to make the logic of having only one
// read_write block in the system impossible. This is to allow the memory to
// say, "I have the block" and for the RubyPort logic to know that this is a
// last-resort block if there are no writable copies in the caching hierarchy.
// This is not supposed to be used in directory or token protocols where
// memory/NB has an idea of what is going on in the whole system.
Backing_Store, desc="for memory in Broadcast/Snoop protocols";
// Invalid data
Invalid, desc="block is in an Invalid base state";
NotPresent, desc="block is NotPresent";
Busy, desc="block is in a transient state, currently invalid";
}
// TesterStatus
enumeration(TesterStatus, desc="...") {
Idle, desc="Idle";
Action_Pending, desc="Action Pending";
Ready, desc="Ready";
Check_Pending, desc="Check Pending";
}
// InvalidateGeneratorStatus
enumeration(InvalidateGeneratorStatus, desc="...") {
Load_Waiting, desc="Load waiting to be issued";
Load_Pending, desc="Load issued";
Inv_Waiting, desc="Store (invalidate) waiting to be issued";
Inv_Pending, desc="Store (invalidate) issued";
}
// SeriesRequestGeneratorStatus
enumeration(SeriesRequestGeneratorStatus, desc="...") {
Thinking, desc="Doing work before next action";
Request_Pending, desc="Request pending";
}
// LockStatus
enumeration(LockStatus, desc="...") {
Unlocked, desc="Lock is not held";
Locked, desc="Lock is held";
}
// SequencerStatus
enumeration(SequencerStatus, desc="...") {
Idle, desc="Idle";
Pending, desc="Pending";
}
enumeration(TransitionResult, desc="...") {
Valid, desc="Valid transition";
ResourceStall, desc="Stalled due to insufficient resources";
ProtocolStall, desc="Protocol specified stall";
}
// RubyRequestType
enumeration(RubyRequestType, desc="...", default="RubyRequestType_NULL") {
LD, desc="Load";
ST, desc="Store";
ATOMIC, desc="Atomic Load/Store";
IFETCH, desc="Instruction fetch";
IO, desc="I/O";
REPLACEMENT, desc="Replacement";
Load_Linked, desc="";
Store_Conditional, desc="";
RMW_Read, desc="";
RMW_Write, desc="";
Locked_RMW_Read, desc="";
Locked_RMW_Write, desc="";
COMMIT, desc="Commit version";
NULL, desc="Invalid request type";
FLUSH, desc="Flush request type";
}
enumeration(SequencerRequestType, desc="...", default="SequencerRequestType_NULL") {
LD, desc="Load";
ST, desc="Store";
NULL, desc="Invalid request type";
}
enumeration(GenericRequestType, desc="...", default="GenericRequestType_NULL") {
GETS, desc="gets request";
GET_INSTR, desc="get instr request";
GETX, desc="getx request";
UPGRADE, desc="upgrade request";
DOWNGRADE, desc="downgrade request";
INV, desc="invalidate request";
INV_S, desc="invalidate shared copy request";
PUTS, desc="puts request";
PUTO, desc="puto request";
PUTX, desc="putx request";
L2_PF, desc="L2 prefetch";
LD, desc="Load";
ST, desc="Store";
ATOMIC, desc="Atomic Load/Store";
IFETCH, desc="Instruction fetch";
IO, desc="I/O";
NACK, desc="Nack";
REPLACEMENT, desc="Replacement";
WB_ACK, desc="WriteBack ack";
EXE_ACK, desc="Execlusive ack";
COMMIT, desc="Commit version";
LD_XACT, desc="Transactional Load";
LDX_XACT, desc="Transactional Load-Intend-Modify";
ST_XACT, desc="Transactional Store";
BEGIN_XACT, desc="Begin Transaction";
COMMIT_XACT, desc="Commit Transaction";
ABORT_XACT, desc="Abort Transaction";
DMA_READ, desc="DMA READ";
DMA_WRITE, desc="DMA WRITE";
NULL, desc="null request type";
}
enumeration(GenericMachineType, desc="...", default="GenericMachineType_NULL") {
L1Cache, desc="L1 Cache Mach";
L2Cache, desc="L2 Cache Mach";
L3Cache, desc="L3 Cache Mach";
Directory, desc="Directory Mach";
DMA, desc="DMA Mach";
Collector, desc="Collector Mach";
L1Cache_wCC, desc="L1 Cache Mach with Cache Coherence (used for miss latency profile)";
L2Cache_wCC, desc="L1 Cache Mach with Cache Coherence (used for miss latency profile)";
NULL, desc="null mach type";
}
// MessageSizeType
enumeration(MessageSizeType, default="MessageSizeType_Undefined", desc="...") {
Undefined, desc="Undefined";
Control, desc="Control Message";
Data, desc="Data Message";
Request_Control, desc="Request";
Reissue_Control, desc="Reissued request";
Response_Data, desc="data response";
ResponseL2hit_Data, desc="data response";
ResponseLocal_Data, desc="data response";
Response_Control, desc="non-data response";
Writeback_Data, desc="Writeback data";
Writeback_Control, desc="Writeback control";
Broadcast_Control, desc="Broadcast control";
Multicast_Control, desc="Multicast control";
Forwarded_Control, desc="Forwarded control";
Invalidate_Control, desc="Invalidate control";
Unblock_Control, desc="Unblock control";
Persistent_Control, desc="Persistent request activation messages";
Completion_Control, desc="Completion messages";
}
// AccessType
enumeration(AccessType, desc="...") {
Read, desc="Reading from cache";
Write, desc="Writing to cache";
}
// RubyAccessMode
enumeration(RubyAccessMode, default="RubyAccessMode_User", desc="...") {
Supervisor, desc="Supervisor mode";
User, desc="User mode";
Device, desc="Device mode";
}
enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
No, desc="No, not a prefetch";
Yes, desc="Yes, a prefetch";
L1_HW, desc="This is a L1 hardware prefetch";
L2_HW, desc="This is a L2 hardware prefetch";
}
// CacheMsg
structure(SequencerMsg, desc="...", interface="Message") {
Address LineAddress, desc="Line address for this request";
Address PhysicalAddress, desc="Physical address for this request";
SequencerRequestType Type, desc="Type of request (LD, ST, etc)";
Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
RubyAccessMode AccessMode, desc="user/supervisor access type";
DataBlock DataBlk, desc="Data";
int Len, desc="size in bytes of access";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// MaskPredictorType
enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
Undefined, desc="Undefined";
AlwaysUnicast, desc="AlwaysUnicast";
TokenD, desc="TokenD";
AlwaysBroadcast, desc="AlwaysBroadcast";
TokenB, desc="TokenB";
TokenNull, desc="TokenNull";
Random, desc="Random";
Pairwise, desc="Pairwise";
Owner, desc="Owner";
BroadcastIfShared, desc="Broadcast-If-Shared";
BroadcastCounter, desc="Broadcast Counter";
Group, desc="Group";
Counter, desc="Counter";
StickySpatial, desc="StickySpatial";
OwnerBroadcast, desc="Owner/Broadcast Hybrid";
OwnerGroup, desc="Owner/Group Hybrid";
OwnerBroadcastMod, desc="Owner/Broadcast Hybrid-Mod";
OwnerGroupMod, desc="Owner/Group Hybrid-Mod";
LastNMasks, desc="Last N Masks";
BandwidthAdaptive, desc="Bandwidth Adaptive";
}
// MaskPredictorIndex
enumeration(MaskPredictorIndex, "MaskPredictorIndex_Undefined", desc="...") {
Undefined, desc="Undefined";
DataBlock, desc="Data Block";
PC, desc="Program Counter";
}
// MaskPredictorTraining
enumeration(MaskPredictorTraining, "MaskPredictorTraining_Undefined", desc="...") {
Undefined, desc="Undefined";
None, desc="None";
Implicit, desc="Implicit";
Explicit, desc="Explicit";
Both, desc="Both";
}
// Network Topologies
enumeration(TopologyType, desc="...") {
CROSSBAR, desc="One node per chip, single switch crossbar";
HIERARCHICAL_SWITCH, desc="One node per chip, totally ordered hierarchical tree switched network";
TORUS_2D, desc="One node per chip, 2D torus";
PT_TO_PT, desc="One node per chip, Point to Point Network";
FILE_SPECIFIED, desc="described by the file NETWORK_FILE";
}
// DNUCA AllocationStrategy
enumeration(AllocationStrategy, desc="...") {
InMiddle, desc="";
InInvCorners, desc="";
InSharedSides, desc="";
StaticDist, desc="";
RandomBank, desc="";
FrequencyBank, desc="";
FrequencyBlock, desc="";
LRUBlock, desc="";
}
// DNUCA SearchMechanism
enumeration(SearchMechanism, desc="...") {
Perfect, desc="";
PartialTag, desc="";
BloomFilter, desc="";
Random, desc="";
None, desc="";
}
// DNUCA link type
enumeration(LinkType, desc="...") {
RC_1500UM, desc="";
RC_2500UM, desc="";
TL_9000UM, desc="";
TL_11000UM, desc="";
TL_13000UM, desc="";
NO_ENERGY, desc="";
NULL, desc="";
}
// transient request type
enumeration(TransientRequestType, desc="...", default="TransientRequestType_Undefined") {
Undefined, desc="";
OffChip, desc="";
OnChip, desc="";
LocalTransient, desc="";
}
// Request Status
enumeration(RequestStatus, desc="...", default="RequestStatus_NULL") {
Ready, desc="The sequencer is ready and the request does not alias";
Issued, desc="The sequencer successfully issued the request";
BufferFull, desc="Can not issue because the sequencer is full";
Aliased, desc="This request aliased with a currently outstanding request";
NULL, desc="";
}
// LinkDirection
enumeration(LinkDirection, desc="...") {
In, desc="Inward link direction";
Out, desc="Outward link direction";
}

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// MemoryRequestType used in MemoryMsg
enumeration(MemoryRequestType, desc="...") {
// Southbound request: from directory to memory cache
// or directory to memory or memory cache to memory
MEMORY_READ, desc="Read request to memory";
MEMORY_WB, desc="Write back data to memory";
// response from memory to directory
// (These are currently unused!)
MEMORY_DATA, desc="Data read from memory";
MEMORY_ACK, desc="Write to memory acknowledgement";
}
// Message to and from Memory Control
structure(MemoryMsg, desc="...", interface="Message") {
Address Address, desc="Physical address for this request";
MemoryRequestType Type, desc="Type of memory request (MEMORY_READ or MEMORY_WB)";
MachineID Sender, desc="What component sent the data";
MachineID OriginalRequestorMachId, desc="What component originally requested";
DataBlock DataBlk, desc="Data to writeback";
MessageSizeType MessageSize, desc="size category of the message";
// Not all fields used by all protocols:
PrefetchBit Prefetch, desc="Is this a prefetch request";
bool ReadX, desc="Exclusive";
int Acks, desc="How many acks to expect";
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Profiler function
void profileStore(NodeID node, bool needCLB);
void profileCacheCLBsize(int size, int numStaleI);
void profileMemoryCLBsize(int size, int numStaleI);
// used by 2level exclusive cache protocols
void profile_miss(RubyRequest msg);
// used by non-fast path protocols
void profile_L1Cache_miss(RubyRequest msg, NodeID l1cacheID);
// used by CMP protocols
void profile_request(std::string L1CacheStateStr, std::string L2CacheStateStr,
std::string directoryStateStr, std::string requestTypeStr);
void profileMessageReordering(bool wasReordered);
void profileMessageReorderingByNetwork(int vnet, bool wasReordered);
void profile_token_retry(Address addr, AccessType type, int count);
void profile_persistent_prediction(Address addr, AccessType type);
void profile_filter_action(int act);
void profile_multicast_retry(Address addr, int count);
void profile_outstanding_request(int outstanding);
void profile_outstanding_persistent_request(int outstanding);
// void profile_overlapping_persistent_request(int overlapping);
void profile_average_latency_estimate(int latency);
// profile the total message delay of a message across a virtual network
void profileMsgDelay(int virtualNetwork, int delayCycles);
// used by transactional-memory protocols
void profile_transaction(int numStores);
void profile_trans_wb();
void profileOverflow(Address addr, MachineID mach);

View File

@ -0,0 +1,186 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// External Types
//
// **PLEASE NOTE!** When adding objects to this file you must also add a line
// in the src/mem/ruby/SConscript file. Otherwise the external object's .hh
// file will not be copied to the protocol directory and you will encounter a
// undefined declaration error.
//
external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
external_type(OutPort, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
bool isReady();
void dequeue();
int dequeue_getDelayCycles();
void recycle();
bool isEmpty();
}
external_type(NodeID, default="0", primitive="yes");
external_type(MachineID);
MessageBuffer getMandatoryQueue(int core_id);
structure (Set, external = "yes", non_obj="yes") {
void setSize(int);
void add(NodeID);
void addSet(Set);
void remove(NodeID);
void removeSet(Set);
void broadcast();
void addRandom();
void clear();
int count();
bool isElement(NodeID);
bool isEqual(Set);
bool isSuperset(Set);
bool intersectionIsEmpty(Set);
NodeID smallestElement();
}
structure (NetDest, external = "yes", non_obj="yes") {
void setSize(int);
void setSize(int, int);
void add(NodeID);
void add(MachineID);
void addSet(Set);
void addNetDest(NetDest);
void setNetDest(MachineType, Set);
void remove(NodeID);
void remove(MachineID);
void removeSet(Set);
void removeNetDest(NetDest);
void broadcast();
void broadcast(MachineType);
void addRandom();
void clear();
Set toSet();
int count();
bool isElement(NodeID);
bool isElement(MachineID);
bool isSuperset(Set);
bool isSuperset(NetDest);
bool isEmpty();
bool intersectionIsEmpty(Set);
bool intersectionIsEmpty(NetDest);
MachineID smallestElement(MachineType);
}
structure (Sequencer, external = "yes") {
void readCallback(Address, DataBlock);
void readCallback(Address, GenericMachineType, DataBlock);
void readCallback(Address, GenericMachineType, DataBlock, Time, Time, Time);
void writeCallback(Address, DataBlock);
void writeCallback(Address, GenericMachineType, DataBlock);
void writeCallback(Address, GenericMachineType, DataBlock, Time, Time, Time);
void checkCoherence(Address);
void profileNack(Address, int, int, uint64);
void evictionCallback(Address);
}
structure(RubyRequest, desc="...", interface="Message", external="yes") {
Address LineAddress, desc="Line address for this request";
Address PhysicalAddress, desc="Physical address for this request";
RubyRequestType Type, desc="Type of request (LD, ST, etc)";
Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
RubyAccessMode AccessMode, desc="user/supervisor access type";
int Size, desc="size in bytes of access";
PrefetchBit Prefetch, desc="Is this a prefetch request";
int contextId, desc="this goes away but must be replace with Nilay";
}
structure(AbstractEntry, primitive="yes", external = "yes") {
void changePermission(AccessPermission);
}
structure (DirectoryMemory, external = "yes") {
AbstractEntry allocate(Address, AbstractEntry);
AbstractEntry lookup(Address);
bool isPresent(Address);
void invalidateBlock(Address);
}
structure(AbstractCacheEntry, primitive="yes", external = "yes") {
void changePermission(AccessPermission);
}
structure (CacheMemory, external = "yes") {
bool cacheAvail(Address);
Address cacheProbe(Address);
AbstractCacheEntry allocate(Address, AbstractCacheEntry);
void allocateVoid(Address, AbstractCacheEntry);
void deallocate(Address);
AbstractCacheEntry lookup(Address);
bool isTagPresent(Address);
void profileMiss(RubyRequest);
void profileGenericRequest(GenericRequestType,
RubyAccessMode,
PrefetchBit);
void setMRU(Address);
}
structure (WireBuffer, inport="yes", outport="yes", external = "yes") {
}
structure (MemoryControl, inport="yes", outport="yes", external = "yes") {
}
structure (DMASequencer, external = "yes") {
void ackCallback();
void dataCallback(DataBlock);
}
structure (TimerTable, inport="yes", external = "yes") {
bool isReady();
Address readyAddress();
void set(Address, int);
void unset(Address);
bool isSet(Address);
}
structure (GenericBloomFilter, external = "yes") {
void clear(int);
void increment(Address, int);
void decrement(Address, int);
void set(Address, int);
void unset(Address, int);
bool isSet(Address, int);
int getCount(Address, int);
}

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Miscallaneous Functions
void error(std::string msg);
void assert(bool condition);
int random(int number);
Time get_time();
Time zero_time();
NodeID intToID(int nodenum);
int IDToInt(NodeID id);
int addressToInt(Address addr);
bool multicast_retry();
int numberOfNodes();
int numberOfL1CachePerChip();
int getAddThenMod(int addend1, int addend2, int modulus);
int time_to_int(Time time);
Time getTimeModInt(Time time, int modulus);
Time getTimePlusInt(Time addend1, int addend2);
Time getTimeMinusTime(Time t1, Time t2);
Time getPreviousDelayedCycles(Time t1, Time t2);
void procProfileCoherenceRequest(NodeID node, bool needCLB);
void dirProfileCoherenceRequest(NodeID node, bool needCLB);
bool isPerfectProtocol();
bool L1trainsPrefetcher();
int max_tokens();
bool distributedPersistentEnabled();
Address setOffset(Address addr, int offset);
Address makeLineAddress(Address addr);
int addressOffset(Address addr);
int mod(int val, int mod);

View File

@ -0,0 +1,7 @@
include "RubySlicc_Exports.sm";
include "RubySlicc_Types.sm";
include "RubySlicc_Util.sm";
include "RubySlicc_ComponentMapping.sm";
include "RubySlicc_Profiler.sm";
include "RubySlicc_Defines.sm";
include "RubySlicc_MemControl.sm";

View File

@ -0,0 +1,104 @@
# -*- mode:python -*-
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
import re
import sys
from os.path import isdir, isfile, join as joinpath
from SCons.Scanner import Classic
Import('*')
if env['PROTOCOL'] == 'None':
Return()
protocol_dir = Dir('.')
html_dir = Dir('html')
slicc_dir = Dir('../slicc')
sys.path[1:1] = [ Dir('..').srcnode().abspath ]
from slicc.parser import SLICC
slicc_depends = []
for root,dirs,files in os.walk(slicc_dir.srcnode().abspath):
for f in files:
if f.endswith('.py'):
slicc_depends.append(File(joinpath(root, f)))
#
# Use SLICC
#
env['SLICC_PATH'] = str(protocol_dir)
slicc_scanner = Classic("SliccScanner", ['.sm', '.slicc'], "SLICC_PATH",
r'''include[ \t]["'](.*)["'];''')
env.Append(SCANNERS=slicc_scanner)
def slicc_emitter(target, source, env):
assert len(source) == 1
filepath = source[0].srcnode().abspath
slicc = SLICC(filepath, verbose=False)
slicc.process()
slicc.writeCodeFiles(protocol_dir.abspath)
if env['SLICC_HTML']:
slicc.writeHTMLFiles(html_dir.abspath)
target.extend([protocol_dir.File(f) for f in sorted(slicc.files())])
return target, source
def slicc_action(target, source, env):
assert len(source) == 1
filepath = source[0].srcnode().abspath
slicc = SLICC(filepath, verbose=True)
slicc.process()
slicc.writeCodeFiles(protocol_dir.abspath)
if env['SLICC_HTML']:
slicc.writeHTMLFiles(html_dir.abspath)
slicc_builder = Builder(action=MakeAction(slicc_action, Transform("SLICC")),
emitter=slicc_emitter)
protocol = env['PROTOCOL']
sources = [ protocol_dir.File("%s.slicc" % protocol) ]
env.Append(BUILDERS={'SLICC' : slicc_builder})
nodes = env.SLICC([], sources)
env.Depends(nodes, slicc_depends)
for f in nodes:
s = str(f)
if s.endswith('.cc'):
Source(f)
elif s.endswith('.py'):
SimObject(f)

View File

@ -0,0 +1,52 @@
# -*- mode:python -*-
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
Import('*')
all_protocols = [
'MESI_CMP_directory',
'MI_example',
'MOESI_CMP_directory',
'MOESI_CMP_token',
'MOESI_hammer',
'Network_test',
'None'
]
opt = EnumVariable('PROTOCOL', 'Coherence protocol for Ruby', 'None',
all_protocols)
sticky_vars.AddVariables(opt)
export_vars += ['PROTOCOL']
opt = BoolVariable('SLICC_HTML', 'Create HTML files', False)
sticky_vars.AddVariables(opt)

View File

@ -0,0 +1,49 @@
MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP
---------------------------------------
CMP System structure:
A CMP (Chip MultiProcessor) System is composed of one or more CMP chips. A CMP chip consists of some number of processors, each with private L1 I+D caches and one shared L2 cache. The shared L2 cache can be sub-divided into banks where each bank as a separate cache controller. One global interconnect is defined that connects all the components in the system including the L1 caches, the L2 cache banks, and the memories/directories.
High-level Protocol Description:
- The L1 has 3 stable states: M, S, I
- L2 maintains strict inclusion and has full knowledge of on-chip L1 sharers via individual bits in the cache tag. The stable
states are below:
M,O,S,I : normal meanings
SS : L2 entry shared, also present in one or more L1s or ext L2s
SO : L2 entry owned, also present in one or more L1s or ext L2s
MT : L2 entry modified in a local L1. L2 copy is stale
- The protocol has a strict on-chip hierarchy where the L1 caches only communicate with their on-chip L2 cache, while off-chip the L2 cache communicates with the directory as well as directly responding to the other L2 caches.
High Level Event Description:
On a L1 GETS miss -
> The L2 controller will satisfy the miss immediately if in M, S, SS, or OS.
> If the L2 controller is in MT, the L2 controller will issue a down-grade request to the on-chip L1 sharer. The L1 sharer will then downgrade to S and copy it's dirty block to the L2. The L2 will then forward that copy to the L1 GETS requestor.
> If not present, the L2 will issue a GETS to the directory. The directory will either forward the request, or respond with data. If forwarded to another L2 controller, the L2 owner will respond with data directly to the L2 requestor which would then respond to the original L1 requestor.
On a L1 GETX miss -
> The L2 controller will satisfy the miss immediately if in M
> If the L2 controller is in MT, the L2 controller will issue an invalidate request to the on-chip L1 sharer. The L1 sharer will then invalidate the block and writeback it's dirty block to the L2. The L2 will then forward that copy to the L1 GETX requestor.
> If the L2 controller is in O, S, or I, the L2 controller will issue a GETX request to the directory. The L2 will wait to receive the data and the necessary number of acks before responding to the L1 GETX requestor with the data and exclusive permission.
> If the L2 controller is in SS or SO, the L2 controller will issue a GETX request to the directory as well as invalidate requests to the on-chip L1 copies. The L2 will wait to receive the data and the necessary number of acks (both on and off chip) before responding to the L1 GETX requestor with the data and exclusive permission.
Other minute details:
The L2 acknowledges all L1 replacements
The L1 acknowledges L2 invalidation requests
The L2 sends an ack to the directory on a 3-hop transfer
The forward request network (Dir -> L2 -> L1) is assumed to be pt-to-pt ordered
The L1 request network (L1 -> L2) is assumed to be pt-to-pt ordered
All other networks are unordered
Ruby Implementation Details:
- The new component network must be used with this protocol
- Each ruby node contains a L1 cache controller, 0 or more L2 cache controllers, and 0 or more directories. There must be at least one directory in the system and at least one L2 cache controller per chip. The directories are statically partitioned by address across the entire system (SMP), while the L2Cache banks are statically partitioned by address across a single chip. There must be some power of 2 for each machine type.
- BUG ALERT: There is a single pool of TBE Entries for the L2 Controller. Therefore it is possible to encounter a resource deadlock for these entries between satisfying L1 requests and directory requests.

View File

@ -0,0 +1,49 @@
MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP
---------------------------------------
CMP System structure:
A CMP (Chip MultiProcessor) System is composed of one or more CMP chips. A CMP chip consists of some number of processors, each with private L1 I+D caches and one shared L2 cache. The shared L2 cache can be sub-divided into banks where each bank as a separate cache controller. One global interconnect is defined that connects all the components in the system including the L1 caches, the L2 cache banks, and the memories/directories.
High-level Protocol Description:
- The L1 has 3 stable states: M, S, I
- L2 maintains strict inclusion and has full knowledge of on-chip L1 sharers via individual bits in the cache tag. The stable
states are below:
M,O,S,I : normal meanings
SS : L2 entry shared, also present in one or more L1s or ext L2s
SO : L2 entry owned, also present in one or more L1s or ext L2s
MT : L2 entry modified in a local L1. L2 copy is stale
- The protocol has a strict on-chip hierarchy where the L1 caches only communicate with their on-chip L2 cache, while off-chip the L2 cache communicates with the directory as well as directly responding to the other L2 caches.
High Level Event Description:
On a L1 GETS miss -
> The L2 controller will satisfy the miss immediately if in M, S, SS, or OS.
> If the L2 controller is in MT, the L2 controller will issue a down-grade request to the on-chip L1 sharer. The L1 sharer will then downgrade to S and copy it's dirty block to the L2. The L2 will then forward that copy to the L1 GETS requestor.
> If not present, the L2 will issue a GETS to the directory. The directory will either forward the request, or respond with data. If forwarded to another L2 controller, the L2 owner will respond with data directly to the L2 requestor which would then respond to the original L1 requestor.
On a L1 GETX miss -
> The L2 controller will satisfy the miss immediately if in M
> If the L2 controller is in MT, the L2 controller will issue an invalidate request to the on-chip L1 sharer. The L1 sharer will then invalidate the block and writeback it's dirty block to the L2. The L2 will then forward that copy to the L1 GETX requestor.
> If the L2 controller is in O, S, or I, the L2 controller will issue a GETX request to the directory. The L2 will wait to receive the data and the necessary number of acks before responding to the L1 GETX requestor with the data and exclusive permission.
> If the L2 controller is in SS or SO, the L2 controller will issue a GETX request to the directory as well as invalidate requests to the on-chip L1 copies. The L2 will wait to receive the data and the necessary number of acks (both on and off chip) before responding to the L1 GETX requestor with the data and exclusive permission.
Other minute details:
The L2 acknowledges all L1 replacements
The L1 acknowledges L2 invalidation requests
The L2 sends an ack to the directory on a 3-hop transfer
The forward request network (Dir -> L2 -> L1) is assumed to be pt-to-pt ordered
The L1 request network (L1 -> L2) is assumed to be pt-to-pt ordered
All other networks are unordered
Ruby Implementation Details:
- The new component network must be used with this protocol
- Each ruby node contains a L1 cache controller, 0 or more L2 cache controllers, and 0 or more directories. There must be at least one directory in the system and at least one L2 cache controller per chip. The directories are statically partitioned by address across the entire system (SMP), while the L2Cache banks are statically partitioned by address across a single chip. There must be some power of 2 for each machine type.
- BUG ALERT: There is a single pool of TBE Entries for the L2 Controller. Therefore it is possible to encounter a resource deadlock for these entries between satisfying L1 requests and directory requests.