1use core::cell::Cell;
8use kernel::ErrorCode;
9
10use kernel::deferred_call::{DeferredCall, DeferredCallClient};
11use kernel::hil;
12use kernel::hil::uart;
13use kernel::utilities::cells::OptionalCell;
14use kernel::utilities::cells::TakeCell;
15use kernel::utilities::registers::interfaces::{ReadWriteable, Readable, Writeable};
16use kernel::utilities::StaticRef;
17
18use crate::registers::uart_regs::UartRegisters;
19use crate::registers::uart_regs::{CTRL, FIFO_CTRL, INTR, STATUS, TIMEOUT_CTRL, WDATA};
20
21pub struct Uart<'a> {
22 registers: StaticRef<UartRegisters>,
23 clock_frequency: u32,
24 tx_client: OptionalCell<&'a dyn hil::uart::TransmitClient>,
25 rx_client: OptionalCell<&'a dyn hil::uart::ReceiveClient>,
26 rx_deferred_call: DeferredCall,
27
28 tx_buffer: TakeCell<'static, [u8]>,
29 tx_len: Cell<usize>,
30 tx_index: Cell<usize>,
31
32 rx_buffer: TakeCell<'static, [u8]>,
33 rx_len: Cell<usize>,
34 rx_index: Cell<usize>,
35 rx_timeout: Cell<u8>,
36}
37
38#[derive(Copy, Clone)]
39pub struct UartParams {
40 pub baud_rate: u32,
41 pub parity: uart::Parity,
42}
43
44fn div_round_bounded(a: u64, b: u64) -> Result<u64, ErrorCode> {
46 let q = a / b;
47
48 if 39 * a <= 40 * b * q {
49 Ok(q)
50 } else if 40 * b * (q + 1) <= 41 * a {
51 Ok(q + 1)
52 } else {
53 Err(ErrorCode::INVAL)
54 }
55}
56
57impl<'a> Uart<'a> {
58 pub fn new(base: StaticRef<UartRegisters>, clock_frequency: u32) -> Uart<'a> {
59 Uart {
60 registers: base,
61 clock_frequency,
62 tx_client: OptionalCell::empty(),
63 rx_client: OptionalCell::empty(),
64 rx_deferred_call: DeferredCall::new(),
65 tx_buffer: TakeCell::empty(),
66 tx_len: Cell::new(0),
67 tx_index: Cell::new(0),
68 rx_buffer: TakeCell::empty(),
69 rx_len: Cell::new(0),
70 rx_index: Cell::new(0),
71 rx_timeout: Cell::new(0),
72 }
73 }
74
75 fn set_baud_rate(&self, baud_rate: u32) -> Result<(), ErrorCode> {
76 const NCO_BITS: u32 = u32::count_ones(CTRL::NCO.mask);
77
78 let regs = self.registers;
79 let baud_adj = (baud_rate as u64) << (NCO_BITS + 4);
80 let freq_clk = self.clock_frequency as u64;
81 let uart_ctrl_nco = div_round_bounded(baud_adj, freq_clk)?;
82
83 regs.ctrl
84 .write(CTRL::NCO.val((uart_ctrl_nco & 0xffff) as u32));
85 regs.ctrl.modify(CTRL::TX::SET + CTRL::RX::SET);
86
87 regs.fifo_ctrl
88 .write(FIFO_CTRL::RXRST::SET + FIFO_CTRL::TXRST::SET);
89
90 Ok(())
91 }
92
93 fn enable_tx_interrupt(&self) {
94 let regs = self.registers;
95
96 regs.intr_enable.modify(INTR::TX_EMPTY::SET);
97 }
98
99 fn disable_tx_interrupt(&self) {
100 let regs = self.registers;
101
102 regs.intr_enable.modify(INTR::TX_EMPTY::CLEAR);
103 regs.intr_state.write(INTR::TX_EMPTY::SET);
105 }
106
107 fn enable_rx_interrupt(&self) {
108 let regs = self.registers;
109
110 regs.intr_enable.modify(INTR::RX_WATERMARK::SET);
112 regs.fifo_ctrl.write(FIFO_CTRL::RXILVL.val(0_u32));
113
114 if !regs.status.is_set(STATUS::RXEMPTY) {
117 self.rx_deferred_call.set();
118 self.disable_rx_interrupt();
119 }
120 }
121
122 fn disable_rx_interrupt(&self) {
123 let regs = self.registers;
124
125 regs.intr_enable.modify(INTR::RX_WATERMARK::CLEAR);
127
128 regs.intr_state.write(INTR::RX_WATERMARK::SET);
130 }
131
132 fn enable_rx_timeout(&self, interbyte_timeout: u8) {
133 let regs = self.registers;
134
135 regs.timeout_ctrl
137 .write(TIMEOUT_CTRL::VAL.val(interbyte_timeout as u32));
138
139 regs.timeout_ctrl.write(TIMEOUT_CTRL::EN::SET);
141
142 regs.intr_enable.write(INTR::RX_TIMEOUT::SET);
144 }
145
146 fn disable_rx_timeout(&self) {
147 let regs = self.registers;
148
149 regs.timeout_ctrl.modify(TIMEOUT_CTRL::EN::CLEAR);
151
152 regs.intr_enable.modify(INTR::RX_TIMEOUT::CLEAR);
154
155 regs.intr_state.write(INTR::RX_TIMEOUT::SET);
157 }
158
159 fn tx_progress(&self) {
160 let regs = self.registers;
161 let idx = self.tx_index.get();
162 let len = self.tx_len.get();
163
164 if idx < len {
165 self.enable_tx_interrupt();
170
171 self.tx_buffer.map(|tx_buf| {
174 let tx_len = len - idx;
175
176 for i in 0..tx_len {
177 if regs.status.is_set(STATUS::TXFULL) {
178 break;
179 }
180 let tx_idx = idx + i;
181 regs.wdata.write(WDATA::WDATA.val(tx_buf[tx_idx] as u32));
182 self.tx_index.set(tx_idx + 1)
183 }
184 });
185 }
186 }
187
188 fn consume_rx(&self) {
189 let regs = self.registers;
190
191 self.rx_client.map(|client| {
192 self.rx_buffer.take().map(|rx_buf| {
193 let mut len = 0;
194 let mut return_code = Ok(());
195
196 for i in self.rx_index.get()..self.rx_len.get() {
197 if regs.status.is_set(STATUS::RXEMPTY) {
198 let rx_timeout = self.rx_timeout.get();
203 if rx_timeout > 0 {
204 self.rx_index.set(i);
205 self.enable_rx_timeout(rx_timeout);
206 self.enable_rx_interrupt();
207 return;
208 } else {
209 return_code = Err(ErrorCode::SIZE);
210 break;
211 }
212 }
213
214 rx_buf[i] = regs.rdata.get() as u8;
215 len = i + 1;
216 }
217
218 client.received_buffer(rx_buf, len, return_code, uart::Error::None);
219 });
220 });
221 }
222
223 pub fn handle_interrupt(&self) {
224 let regs = self.registers;
225 let intrs = regs.intr_state.extract();
226
227 if intrs.is_set(INTR::TX_EMPTY) {
228 self.disable_tx_interrupt();
229
230 if self.tx_index.get() == self.tx_len.get() {
231 self.tx_client.map(|client| {
234 self.tx_buffer.take().map(|tx_buf| {
235 client.transmitted_buffer(tx_buf, self.tx_len.get(), Ok(()));
236 });
237 });
238 } else {
239 self.tx_progress();
241 }
242 } else if intrs.is_set(INTR::RX_WATERMARK) {
243 self.disable_rx_interrupt();
244 self.consume_rx();
245 } else if intrs.is_set(INTR::RX_TIMEOUT) {
246 self.disable_rx_interrupt();
247 self.disable_rx_timeout();
248
249 self.rx_client.map(|client| {
251 self.rx_buffer.take().map(|rx_buf| {
252 client.received_buffer(
253 rx_buf,
254 self.rx_index.get() + 1,
255 Err(kernel::ErrorCode::SIZE),
256 uart::Error::None,
257 );
258 })
259 });
260 } else if intrs.is_set(INTR::TX_WATERMARK) {
261 } else if intrs.is_set(INTR::RX_OVERFLOW) {
263 self.disable_rx_interrupt();
264 self.rx_client.map(|client| {
265 self.rx_buffer.take().map(|rx_buf| {
266 client.received_buffer(
267 rx_buf,
268 self.rx_index.get(),
269 Err(kernel::ErrorCode::FAIL),
270 uart::Error::OverrunError,
271 );
272 });
273 });
274 } else if intrs.is_set(INTR::RX_FRAME_ERR) {
275 self.disable_rx_interrupt();
276 self.rx_client.map(|client| {
277 self.rx_buffer.take().map(|rx_buf| {
278 client.received_buffer(
279 rx_buf,
280 self.rx_index.get(),
281 Err(kernel::ErrorCode::FAIL),
282 uart::Error::FramingError,
283 );
284 });
285 });
286 } else if intrs.is_set(INTR::RX_BREAK_ERR) {
287 self.disable_rx_interrupt();
288 self.rx_client.map(|client| {
289 self.rx_buffer.take().map(|rx_buf| {
290 client.received_buffer(
291 rx_buf,
292 self.rx_index.get(),
293 Err(kernel::ErrorCode::FAIL),
294 uart::Error::BreakError,
295 );
296 });
297 });
298 } else if intrs.is_set(INTR::RX_PARITY_ERR) {
299 self.disable_rx_interrupt();
300 self.rx_client.map(|client| {
301 self.rx_buffer.take().map(|rx_buf| {
302 client.received_buffer(
303 rx_buf,
304 self.rx_index.get(),
305 Err(kernel::ErrorCode::FAIL),
306 uart::Error::ParityError,
307 );
308 });
309 });
310 }
311 }
312
313 pub fn transmit_sync(&self, bytes: &[u8]) {
314 let regs = self.registers;
315 for b in bytes.iter() {
316 while regs.status.is_set(STATUS::TXFULL) {}
317 regs.wdata.write(WDATA::WDATA.val(*b as u32));
318 }
319 }
320}
321
322impl hil::uart::Configure for Uart<'_> {
323 fn configure(&self, params: hil::uart::Parameters) -> Result<(), ErrorCode> {
324 let regs = self.registers;
325 self.set_baud_rate(params.baud_rate)?;
327
328 match params.parity {
329 uart::Parity::Even => regs
330 .ctrl
331 .modify(CTRL::PARITY_EN::SET + CTRL::PARITY_ODD::CLEAR),
332 uart::Parity::Odd => regs
333 .ctrl
334 .modify(CTRL::PARITY_EN::SET + CTRL::PARITY_ODD::SET),
335 uart::Parity::None => regs
336 .ctrl
337 .modify(CTRL::PARITY_EN::CLEAR + CTRL::PARITY_ODD::CLEAR),
338 }
339
340 regs.fifo_ctrl
341 .write(FIFO_CTRL::RXRST::SET + FIFO_CTRL::TXRST::SET);
342
343 regs.intr_enable.set(0_u32);
345
346 Ok(())
347 }
348}
349
350impl<'a> hil::uart::Transmit<'a> for Uart<'a> {
351 fn set_transmit_client(&self, client: &'a dyn hil::uart::TransmitClient) {
352 self.tx_client.set(client);
353 }
354
355 fn transmit_buffer(
356 &self,
357 tx_data: &'static mut [u8],
358 tx_len: usize,
359 ) -> Result<(), (ErrorCode, &'static mut [u8])> {
360 if tx_len == 0 || tx_len > tx_data.len() {
361 Err((ErrorCode::SIZE, tx_data))
362 } else if self.tx_buffer.is_some() {
363 Err((ErrorCode::BUSY, tx_data))
364 } else {
365 self.tx_buffer.replace(tx_data);
367 self.tx_len.set(tx_len);
368 self.tx_index.set(0);
369
370 self.tx_progress();
371 Ok(())
372 }
373 }
374
375 fn transmit_abort(&self) -> Result<(), ErrorCode> {
376 Err(ErrorCode::FAIL)
377 }
378
379 fn transmit_word(&self, _word: u32) -> Result<(), ErrorCode> {
380 Err(ErrorCode::FAIL)
381 }
382}
383
384impl<'a> hil::uart::Receive<'a> for Uart<'a> {
386 fn set_receive_client(&self, client: &'a dyn hil::uart::ReceiveClient) {
387 self.rx_client.set(client);
388 }
389
390 fn receive_buffer(
391 &self,
392 rx_buffer: &'static mut [u8],
393 rx_len: usize,
394 ) -> Result<(), (ErrorCode, &'static mut [u8])> {
395 if rx_len == 0 || rx_len > rx_buffer.len() {
396 return Err((ErrorCode::SIZE, rx_buffer));
397 }
398
399 self.enable_rx_interrupt();
400
401 self.rx_buffer.replace(rx_buffer);
402 self.rx_len.set(rx_len);
403 self.rx_timeout.set(0);
404 self.rx_index.set(0);
405
406 Ok(())
407 }
408
409 fn receive_abort(&self) -> Result<(), ErrorCode> {
410 Err(ErrorCode::FAIL)
411 }
412
413 fn receive_word(&self) -> Result<(), ErrorCode> {
414 Err(ErrorCode::FAIL)
415 }
416}
417
418impl<'a> hil::uart::ReceiveAdvanced<'a> for Uart<'a> {
419 fn receive_automatic(
420 &self,
421 rx_buffer: &'static mut [u8],
422 rx_len: usize,
423 interbyte_timeout: u8,
424 ) -> Result<(), (ErrorCode, &'static mut [u8])> {
425 if rx_len == 0 || rx_len > rx_buffer.len() {
426 return Err((ErrorCode::SIZE, rx_buffer));
427 }
428
429 self.rx_buffer.replace(rx_buffer);
430 self.rx_len.set(rx_len);
431 self.rx_timeout.set(interbyte_timeout);
432 self.rx_index.set(0);
433
434 Ok(())
435 }
436}
437
438impl DeferredCallClient for Uart<'_> {
439 fn handle_deferred_call(&self) {
440 self.consume_rx();
441 }
442
443 fn register(&'static self) {
444 self.rx_deferred_call.register(self);
445 }
446}
447
448#[cfg(test)]
449mod tests {
450 use super::div_round_bounded;
451 use kernel::ErrorCode;
452
453 #[test]
454 fn test_bounded_division() {
455 const TEST_VECTORS: [(u64, u64, Result<u64, ErrorCode>); 10] = [
456 (100, 4, Ok(25)),
457 (41, 40, Ok(1)),
458 (83, 40, Err(ErrorCode::INVAL)),
459 (105, 40, Err(ErrorCode::INVAL)),
460 (120, 40, Ok(3)),
461 (121, 40, Ok(3)),
462 (158, 40, Ok(4)),
463 (159, 40, Ok(4)),
464 (10, 3, Err(ErrorCode::INVAL)),
465 (120_795_955_200, 6_000_000, Ok(20132)),
466 ];
467 for (a, b, expected) in &TEST_VECTORS {
468 assert_eq!(div_round_bounded(*a, *b), *expected);
469 }
470 }
471}