Skip to content

Commit 33086c7

Browse files
committed
fix bugs, inconsistencies and add more docs.
1 parent 99bdf37 commit 33086c7

10 files changed

Lines changed: 552 additions & 311 deletions

File tree

src/mem/pfa/bitset.rs

Lines changed: 10 additions & 178 deletions
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,18 @@ use hal::mem::PhysAddr;
55

66
use crate::{
77
error::Result,
8-
types::boxed::{self, Box},
8+
types::{
9+
bitset::BitAlloc,
10+
boxed::{self, Box},
11+
},
912
};
1013

1114
pub struct Allocator<const N: usize> {
1215
begin: PhysAddr,
13-
l1: [usize; N],
16+
bitalloc: BitAlloc<N>,
1417
}
1518

1619
impl<const N: usize> Allocator<N> {
17-
const BITS_PER_WORD: usize = usize::BITS as usize;
18-
1920
pub fn new(begin: PhysAddr) -> Option<Self> {
2021
if !begin.is_multiple_of(super::PAGE_SIZE) {
2122
return None;
@@ -27,7 +28,7 @@ impl<const N: usize> Allocator<N> {
2728

2829
Some(Self {
2930
begin,
30-
l1: [!0; N], // All bits are set to 1, meaning all pages are free.
31+
bitalloc: BitAlloc::new(N * BitAlloc::<N>::BITS_PER_WORD)?,
3132
})
3233
}
3334
}
@@ -65,184 +66,15 @@ impl<const N: usize> super::Allocator<N> for Allocator<N> {
6566
}
6667

6768
fn alloc(&mut self, page_count: usize) -> Option<PhysAddr> {
68-
// If a bit is 1 the page is free. If a bit is 0 the page is allocated.
69-
let mut start = 0;
70-
let mut len = 0usize;
71-
72-
let rem = page_count.saturating_sub(Self::BITS_PER_WORD);
73-
let mask = (!0usize).unbounded_shl((Self::BITS_PER_WORD.saturating_sub(page_count)) as u32);
74-
75-
for idx in 0..N {
76-
if self.l1[idx] == 0 {
77-
len = 0;
78-
continue;
79-
}
80-
81-
let mut byte = self.l1[idx];
82-
83-
let mut shift = if len > 0 {
84-
0usize
85-
} else {
86-
byte.leading_zeros() as usize
87-
};
88-
89-
byte <<= shift;
90-
91-
while shift < Self::BITS_PER_WORD {
92-
// Make the mask smaller if we already have some contiguous bits.
93-
let mask = if rem.saturating_sub(len) == 0 {
94-
mask << (len - rem)
95-
} else {
96-
mask
97-
};
98-
99-
// We shifted byte to MSB, mask is already aligned to the left.
100-
// We compare them via and and shift to the right to shift out extra bits from the mask that would overflow into the next word.
101-
let mut found = (byte & mask) >> shift;
102-
103-
// We also need to shift the mask to the right so that we can compare mask and found.
104-
if found == (mask >> shift) {
105-
if len == 0 {
106-
start = idx * Self::BITS_PER_WORD + shift;
107-
}
108-
109-
// Shift completely to the right.
110-
found >>= found.trailing_zeros();
111-
112-
// As all found bits are now on the right we can just count them to get the amount we found.
113-
len += found.trailing_ones() as usize;
114-
// Continue to the next word if we haven't found enough bits yet.
115-
break;
116-
} else {
117-
len = 0;
118-
}
119-
120-
shift += 1;
121-
byte <<= 1;
122-
}
123-
124-
if len >= page_count {
125-
// Mark the allocated pages as used.
126-
let mut idx = start / Self::BITS_PER_WORD;
127-
128-
// Mark all bits in the first word as used.
129-
{
130-
let skip = start % Self::BITS_PER_WORD;
131-
let rem = (Self::BITS_PER_WORD - skip).min(len);
132-
133-
self.l1[idx] &=
134-
!((!0usize).unbounded_shl((Self::BITS_PER_WORD - rem) as u32) >> skip);
135-
136-
if len <= rem {
137-
return Some(self.begin + (start * super::PAGE_SIZE));
138-
}
139-
140-
len -= rem;
141-
idx += 1;
142-
}
143-
144-
// Mark all bits in the middle words as used.
145-
{
146-
let mid_cnt = len / Self::BITS_PER_WORD;
147-
148-
for i in 0..mid_cnt {
149-
self.l1[idx + i] = 0;
150-
}
151-
152-
idx += mid_cnt;
153-
}
154-
155-
// Mark the remaining bits in the last word as used.
156-
self.l1[idx] &= !((!0usize)
157-
.unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32));
158-
return Some(self.begin + (start * super::PAGE_SIZE));
159-
}
160-
}
161-
162-
None
69+
let idx = self.bitalloc.alloc(page_count)?;
70+
Some(self.begin + (idx * super::PAGE_SIZE))
16371
}
16472

16573
fn free(&mut self, addr: PhysAddr, page_count: usize) {
16674
if !addr.is_multiple_of(super::PAGE_SIZE) {
16775
panic!("Address must be page aligned");
16876
}
169-
170-
let mut idx =
171-
(addr.as_usize() - self.begin.as_usize()) / super::PAGE_SIZE / Self::BITS_PER_WORD;
172-
let mut bit_idx =
173-
((addr.as_usize() - self.begin.as_usize()) / super::PAGE_SIZE) % Self::BITS_PER_WORD;
174-
175-
// TODO: slow
176-
for _ in 0..page_count {
177-
self.l1[idx] |= 1 << (Self::BITS_PER_WORD - 1 - bit_idx);
178-
179-
bit_idx += 1;
180-
181-
if bit_idx == Self::BITS_PER_WORD {
182-
bit_idx = 0;
183-
idx += 1;
184-
}
185-
}
186-
}
187-
}
188-
189-
#[cfg(test)]
190-
mod tests {
191-
use super::*;
192-
193-
#[test]
194-
fn last_bit_underflow() {
195-
// Only the last page in word 0 is free
196-
let mut allocator = Allocator::<1>::new(PhysAddr::new(0)).unwrap();
197-
allocator.l1[0] = 1;
198-
199-
let result = super::super::Allocator::alloc(&mut allocator, 1);
200-
201-
assert!(result.is_some());
202-
}
203-
204-
#[test]
205-
fn test_random_pattern() {
206-
const ITARATIONS: usize = 1000;
207-
208-
for _ in 0..ITARATIONS {
209-
const N: usize = 1024;
210-
const BITS: usize = Allocator::<N>::BITS_PER_WORD;
211-
const ALLOC_SIZE: usize = 100;
212-
213-
let mut allocator = Allocator::<N>::new(PhysAddr::new(0x0)).unwrap();
214-
215-
// Generate a random bit pattern.
216-
for i in 0..N {
217-
let is_zero = rand::random::<bool>();
218-
219-
if is_zero {
220-
allocator.l1[i / BITS] &= !(1 << ((BITS - 1) - (i % BITS)));
221-
}
222-
}
223-
224-
// Place a run of ALLOC_SIZE contiguous bits set to 1 at a random position.
225-
let start = rand::random::<usize>() % (N - ALLOC_SIZE);
226-
for i in start..(start + ALLOC_SIZE) {
227-
allocator.l1[i / BITS] |= 1 << ((BITS - 1) - (i % BITS));
228-
}
229-
230-
let pre = allocator.l1.clone();
231-
232-
let addr = super::super::Allocator::alloc(&mut allocator, ALLOC_SIZE).unwrap();
233-
let idx = addr.as_usize() / super::super::PAGE_SIZE;
234-
235-
// Check that the bits in returned addresses is all ones in pre.
236-
for i in 0..ALLOC_SIZE {
237-
let bit = (pre[(idx + i) / BITS] >> ((BITS - 1) - ((idx + i) % BITS))) & 1;
238-
assert_eq!(bit, 1, "Bit at index {} is not set", idx + i);
239-
}
240-
241-
// Check that the bits in returned addresses is all zeros in allocator.l1.
242-
for i in 0..ALLOC_SIZE {
243-
let bit = (allocator.l1[(idx + i) / BITS] >> ((BITS - 1) - ((idx + i) % BITS))) & 1;
244-
assert_eq!(bit, 0, "Bit at index {} is not cleared", idx + i);
245-
}
246-
}
77+
let idx = addr.diff(self.begin) / super::PAGE_SIZE;
78+
self.bitalloc.free(idx, page_count);
24779
}
24880
}

0 commit comments

Comments
 (0)