From 280fa4d99e3d2dfbc3e8d4f006b62e4b78538677 Mon Sep 17 00:00:00 2001
From: David Li
Date: Mon, 28 Dec 2015 20:43:12 -0700
Subject: Specialize to direct-mapped cache
---
src/memory.rs | 56 +++++++++++++++++++++++++++++++++++---------------------
1 file changed, 35 insertions(+), 21 deletions(-)
diff --git a/src/memory.rs b/src/memory.rs
index a8b15e2..c3cc60a 100644
--- a/src/memory.rs
+++ b/src/memory.rs
@@ -30,7 +30,7 @@ pub type Result = ::std::result::Result;
pub trait MemoryInterface {
fn latency() -> u32;
- fn read_word(&self, address: isa::Address) -> Result;
+ fn read_word(&mut self, address: isa::Address) -> Result;
fn write_word(&mut self, address: isa::Address, value: isa::Word) -> Result<()>;
// fn read_halfword(&self, address: isa::Address) -> Result;
@@ -63,17 +63,14 @@ struct CacheBlock {
fetch_request: Option,
}
-type CacheSet = Vec;
-
// TODO: probably want different caches for different strategies, and
// investigate how LRU is implemented
// TODO: use hashtable for a way?
// TODO: hashtable-based FA cache?
-pub struct Cache {
+pub struct DirectMappedCache {
num_sets: u32,
- num_ways: u32,
block_words: u32,
- cache: Vec,
+ cache: Vec,
next_level: T,
}
@@ -102,7 +99,7 @@ impl MemoryInterface for Memory {
100
}
- fn read_word(&self, address: isa::Address) -> Result {
+ fn read_word(&mut self, address: isa::Address) -> Result {
// memory is word-addressed but addresses are byte-addressed
self.memory.get((address / 4) as usize)
.map(Clone::clone)
@@ -122,17 +119,17 @@ impl MemoryInterface for Memory {
}
}
-impl Cache {
- pub fn new(sets: u32, ways: u32, block_words: u32, next_level: T) -> Cache {
- let set = vec![CacheBlock {
+impl DirectMappedCache {
+ pub fn new(sets: u32, block_words: u32, next_level: T)
+ -> DirectMappedCache {
+ let set = CacheBlock {
valid: false,
tag: 0,
contents: vec![0; block_words as usize],
fetch_request: None,
- }; ways as usize];
- Cache {
+ };
+ DirectMappedCache {
num_sets: sets,
- num_ways: ways,
block_words: block_words,
cache: vec![set; sets as usize],
next_level: next_level,
@@ -152,6 +149,11 @@ impl Cache {
(tag, index, offset)
}
+ fn normalize_address(&self, address: isa::Address) -> isa::Address {
+ let offset_mask = !(self.block_words * 4 - 1);
+ address & offset_mask
+ }
+
pub fn prefetch(&mut self, address: isa::Address) {
}
@@ -161,21 +163,33 @@ impl Cache {
}
}
-impl MemoryInterface for Cache {
+impl MemoryInterface for DirectMappedCache {
fn latency() -> u32 {
100
}
- fn read_word(&self, address: isa::Address) -> Result {
+ fn read_word(&mut self, address: isa::Address) -> Result {
+ let normalized = self.normalize_address(address);
let (tag, index, offset) = self.parse_address(address);
- let ref set = self.cache[index as usize];
- for way in set {
- if way.tag == tag {
- return Ok(way.contents[(offset / 4) as usize]);
- }
+ let ref mut set = self.cache[index as usize];
+ let stall = DirectMappedCache::::latency() + T::latency();
+ if set.tag == tag {
+ return Ok(set.contents[(offset / 4) as usize]);
+ }
+ else if let None = set.fetch_request {
+ set.fetch_request = Some(FetchRequest {
+ address: normalized,
+ prefetch: false,
+ cycles_left: stall,
+ })
+ }
+ else if let Some(ref fetch_request) = set.fetch_request {
+ return Err(MemoryError::CacheMiss {
+ stall_cycles: fetch_request.cycles_left
+ });
}
Err(MemoryError::CacheMiss {
- stall_cycles: Cache::::latency() + T::latency()
+ stall_cycles: stall,
})
}
--
cgit v1.2.3