1use core::cell::Cell;
7use core::cmp;
8use kernel::hil;
9use kernel::hil::spi::SpiMaster;
10use kernel::hil::spi::{ClockPhase, ClockPolarity};
11use kernel::utilities::cells::{MapCell, OptionalCell};
12use kernel::utilities::leasable_buffer::SubSliceMut;
13use kernel::utilities::registers::interfaces::{ReadWriteable, Readable, Writeable};
14use kernel::utilities::registers::{
15 register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly,
16};
17use kernel::utilities::StaticRef;
18use kernel::ErrorCode;
19
20#[derive(Clone, Copy, Debug, PartialEq, Eq)]
21pub enum SpiHostStatus {
22 SpiTransferCmplt,
23 SpiTransferInprog,
24}
25
26register_structs! {
27 pub SpiHostRegisters {
28 (0x000 => intr_state: ReadWrite<u32, intr::Register>),
30 (0x004 => intr_enable: ReadWrite<u32, intr::Register>),
32 (0x008 => intr_test: WriteOnly<u32, intr::Register>),
34 (0x00c => alert_test: WriteOnly<u32, alert_test::Register>),
36 (0x010 => ctrl: ReadWrite<u32, ctrl::Register>),
38 (0x014 => status: ReadOnly<u32, status::Register>),
40 (0x018 => config_opts: ReadWrite<u32, conf_opts::Register>),
42 (0x01c => csid: ReadWrite<u32, csid_ctrl::Register>),
44 (0x020 => command: WriteOnly<u32, command::Register>),
46 (0x024 => rx_data: ReadWrite<u32, rx_data::Register>),
48 (0x028 => tx_data: WriteOnly<u32, tx_data::Register>),
50 (0x02c => err_en: ReadWrite<u32, err_en::Register>),
52 (0x030 => err_status: ReadWrite<u32, err_status::Register>),
54 (0x034 => event_en: ReadWrite<u32, event_en::Register>),
56 (0x38 => @END),
57 }
58}
59
60register_bitfields![u32,
61 intr [
62 ERROR OFFSET(0) NUMBITS(1) [],
63 SPI_EVENT OFFSET(1) NUMBITS(1) [],
64 ],
65 alert_test [
66 FETAL_FAULT OFFSET(0) NUMBITS(1) [],
67 ],
68 ctrl [
69 RX_WATERMARK OFFSET(0) NUMBITS(8) [],
70 TX_WATERMARK OFFSET(8) NUMBITS(8) [],
71 OUTPUT_EN OFFSET(29) NUMBITS(1) [],
73 SW_RST OFFSET(30) NUMBITS(1) [],
74 SPIEN OFFSET(31) NUMBITS(1) []
75 ],
76 status [
77 TXQD OFFSET(0) NUMBITS(8) [],
78 RXQD OFFSET(15) NUMBITS(8) [],
79 CMDQD OFFSET(16) NUMBITS(1) [],
80 RXWM OFFSET(20) NUMBITS(1) [],
81 BYTEORDER OFFSET(22) NUMBITS(1) [],
82 RXSTALL OFFSET(23) NUMBITS(1) [],
83 RXEMPTY OFFSET(24) NUMBITS(1) [],
84 RXFULL OFFSET(25) NUMBITS(1) [],
85 TXWM OFFSET(26) NUMBITS(1) [],
86 TXSTALL OFFSET(27) NUMBITS(1) [],
87 TXEMPTY OFFSET(28) NUMBITS(1) [],
88 TXFULL OFFSET(29) NUMBITS(1) [],
89 ACTIVE OFFSET(30) NUMBITS(1) [],
90 READY OFFSET(31) NUMBITS(1) [],
91 ],
92 conf_opts [
93 CLKDIV_0 OFFSET(0) NUMBITS(16) [],
94 CSNIDLE_0 OFFSET(16) NUMBITS(3) [],
95 CSNTRAIL_0 OFFSET(20) NUMBITS(3) [],
96 CSNLEAD_0 OFFSET(24) NUMBITS(3) [],
97 FULLCYC_0 OFFSET(29) NUMBITS(1) [],
99 CPHA_0 OFFSET(30) NUMBITS(1) [],
100 CPOL_0 OFFSET(31) NUMBITS(1) [],
101 ],
102 csid_ctrl [
103 CSID OFFSET(0) NUMBITS(32) [],
104 ],
105 command [
106 LEN OFFSET(0) NUMBITS(8) [],
107 CSAAT OFFSET(9) NUMBITS(1) [],
108 SPEED OFFSET(10) NUMBITS(2) [],
109 DIRECTION OFFSET(12) NUMBITS(2) [],
110 ],
111 rx_data [
112 DATA OFFSET(0) NUMBITS(32) [],
113 ],
114 tx_data [
115 DATA OFFSET(0) NUMBITS(32) [],
116 ],
117 err_en [
118 CMDBUSY OFFSET(0) NUMBITS(1) [],
119 OVERFLOW OFFSET(1) NUMBITS(1) [],
120 UNDERFLOW OFFSET(2) NUMBITS(1) [],
121 CMDINVAL OFFSET(3) NUMBITS(1) [],
122 CSIDINVAL OFFSET(4) NUMBITS(1) [],
123 ],
124 err_status [
125 CMDBUSY OFFSET(0) NUMBITS(1) [],
126 OVERFLOW OFFSET(1) NUMBITS(1) [],
127 UNDERFLOW OFFSET(2) NUMBITS(1) [],
128 CMDINVAL OFFSET(3) NUMBITS(1) [],
129 CSIDINVAL OFFSET(4) NUMBITS(1) [],
130 ACCESSINVAL OFFSET(5) NUMBITS(1) [],
131 ],
132 event_en [
133 RXFULL OFFSET(0) NUMBITS(1) [],
134 TXEMPTY OFFSET(1) NUMBITS(1) [],
135 RXWM OFFSET(2) NUMBITS(1) [],
136 TXWM OFFSET(3) NUMBITS(1) [],
137 READY OFFSET(4) NUMBITS(1) [],
138 IDLE OFFSET(5) NUMBITS(1) [],
139 ],
140];
141
142pub struct SpiHost<'a> {
143 registers: StaticRef<SpiHostRegisters>,
144 client: OptionalCell<&'a dyn hil::spi::SpiMasterClient>,
145 busy: Cell<bool>,
146 cpu_clk: u32,
147 tsclk: Cell<u32>,
148 tx_buf: MapCell<SubSliceMut<'static, u8>>,
149 rx_buf: MapCell<SubSliceMut<'static, u8>>,
150 tx_len: Cell<usize>,
151 rx_len: Cell<usize>,
152 tx_offset: Cell<usize>,
153 rx_offset: Cell<usize>,
154}
155const SPI_HOST_CMD_BIDIRECTIONAL: u32 = 3;
157const SPI_HOST_CMD_STANDARD_SPI: u32 = 0;
159
160impl SpiHost<'_> {
161 pub fn new(base: StaticRef<SpiHostRegisters>, cpu_clk: u32) -> Self {
162 SpiHost {
163 registers: base,
164 client: OptionalCell::empty(),
165 busy: Cell::new(false),
166 cpu_clk,
167 tsclk: Cell::new(0),
168 tx_buf: MapCell::empty(),
169 rx_buf: MapCell::empty(),
170 tx_len: Cell::new(0),
171 rx_len: Cell::new(0),
172 tx_offset: Cell::new(0),
173 rx_offset: Cell::new(0),
174 }
175 }
176
177 pub fn handle_interrupt(&self) {
178 let regs = self.registers;
179 let irq = regs.intr_state.extract();
180 self.disable_interrupts();
181
182 if irq.is_set(intr::ERROR) {
183 self.clear_err_interrupt();
185 self.reset_spi_ip();
187 self.reset_internal_state();
188 self.client.map(|client| match self.tx_buf.take() {
190 None => (),
191 Some(tx_buf) => {
192 client.read_write_done(tx_buf, self.rx_buf.take(), Err(ErrorCode::FAIL))
193 }
194 });
195 return;
196 }
197
198 if irq.is_set(intr::SPI_EVENT) {
199 let status = regs.status.extract();
200 self.clear_event_interrupt();
201
202 if status.is_set(status::TXEMPTY) && self.is_busy() {
205 match self.continue_transfer() {
206 Ok(SpiHostStatus::SpiTransferCmplt) => {
207 self.client.map(|client| match self.tx_buf.take() {
209 None => (),
210 Some(tx_buf) => client.read_write_done(
211 tx_buf,
212 self.rx_buf.take(),
213 Ok(self.tx_len.get()),
214 ),
215 });
216
217 self.disable_tx_interrupt();
218 self.reset_internal_state();
219 }
220 Ok(SpiHostStatus::SpiTransferInprog) => {}
221 Err(err) => {
222 self.clear_err_interrupt();
225 self.reset_spi_ip();
227 self.reset_internal_state();
228 self.client.map(|client| match self.tx_buf.take() {
229 None => (),
230 Some(tx_buf) => {
231 client.read_write_done(tx_buf, self.rx_buf.take(), Err(err))
232 }
233 });
234 }
235 }
236 } else {
237 self.enable_interrupts();
238 }
239 }
240 }
241
242 fn continue_transfer(&self) -> Result<SpiHostStatus, ErrorCode> {
245 let rc = self.rx_buf.take().map_or(
246 Err(ErrorCode::FAIL),
247 |mut rx_buf| -> Result<SpiHostStatus, ErrorCode> {
248 let regs = self.registers;
249 let mut val32: u32;
250 let mut val8: u8;
251 let mut shift_mask;
252 let rx_len = self.tx_offset.get() - self.rx_offset.get();
253 let read_cycles = self.div_up(rx_len, 4);
254
255 for _n in 0..read_cycles {
257 val32 = regs.rx_data.read(rx_data::DATA);
258 shift_mask = 0xFF;
259 for i in 0..4 {
260 if self.rx_offset.get() >= self.rx_len.get() {
261 break;
262 }
263 val8 = ((val32 & shift_mask) >> (i * 8)) as u8;
264 if let Some(ptr) = rx_buf.as_mut_slice().get_mut(self.rx_offset.get()) {
265 *ptr = val8;
266 } else {
267 break;
269 }
270 self.rx_offset.set(self.rx_offset.get() + 1);
271 shift_mask <<= 8;
272 }
273 }
274 self.rx_buf.replace(rx_buf);
276 if self.tx_offset.get() == self.tx_len.get() {
278 Ok(SpiHostStatus::SpiTransferCmplt)
279 } else {
280 self.spi_transfer_progress()
282 }
283 },
284 );
285
286 rc
287 }
288
289 fn spi_transfer_progress(&self) -> Result<SpiHostStatus, ErrorCode> {
291 let mut transfer_complete = false;
292 if self
293 .tx_buf
294 .take()
295 .map(|tx_buf| -> Result<(), ErrorCode> {
296 let regs = self.registers;
297 let mut t_byte: u32;
298 let mut tx_slice: [u8; 4];
299
300 if regs.status.read(status::TXQD) != 0 || regs.status.read(status::ACTIVE) != 0 {
301 self.tx_buf.replace(tx_buf);
302 return Err(ErrorCode::BUSY);
303 }
304
305 while !regs.status.is_set(status::TXFULL) && regs.status.read(status::TXQD) < 64 {
306 tx_slice = [0, 0, 0, 0];
307 for elem in tx_slice.iter_mut() {
308 if self.tx_offset.get() >= self.tx_len.get() {
309 break;
310 }
311 if let Some(val) = tx_buf.as_slice().get(self.tx_offset.get()) {
312 *elem = *val;
313 self.tx_offset.set(self.tx_offset.get() + 1);
314 } else {
315 break;
317 }
318 }
319 t_byte = u32::from_le_bytes(tx_slice);
320 regs.tx_data.write(tx_data::DATA.val(t_byte));
321
322 if self.tx_offset.get() >= self.tx_len.get() {
324 transfer_complete = true;
325 break;
326 }
327 }
328
329 self.tx_buf.replace(tx_buf);
331
332 self.start_transceive();
334 Ok(())
335 })
336 .transpose()
337 .is_err()
338 {
339 return Err(ErrorCode::BUSY);
340 }
341
342 if transfer_complete {
343 Ok(SpiHostStatus::SpiTransferCmplt)
344 } else {
345 Ok(SpiHostStatus::SpiTransferInprog)
346 }
347 }
348
349 fn start_transceive(&self) {
352 let regs = self.registers;
353 let txfifo_num_bytes = regs.status.read(status::TXQD) * 4;
355
356 let num_transfer_bytes: u32 = if txfifo_num_bytes > u8::MAX as u32 {
358 u8::MAX as u32
359 } else {
360 txfifo_num_bytes
361 };
362
363 self.enable_interrupts();
364 self.enable_tx_interrupt();
365
366 if self.tx_offset.get() >= self.tx_len.get() {
369 regs.command.write(
370 command::LEN.val(num_transfer_bytes)
371 + command::DIRECTION.val(SPI_HOST_CMD_BIDIRECTIONAL)
372 + command::CSAAT::CLEAR
373 + command::SPEED.val(SPI_HOST_CMD_STANDARD_SPI),
374 );
375 } else {
376 regs.command.write(
377 command::LEN.val(num_transfer_bytes)
378 + command::DIRECTION.val(SPI_HOST_CMD_BIDIRECTIONAL)
379 + command::CSAAT::SET
380 + command::SPEED.val(SPI_HOST_CMD_STANDARD_SPI),
381 );
382 }
383 }
384
385 fn reset_internal_state(&self) {
388 self.clear_spi_busy();
389 self.tx_len.set(0);
390 self.rx_len.set(0);
391 self.tx_offset.set(0);
392 self.rx_offset.set(0);
393
394 debug_assert!(self.tx_buf.is_none());
395 debug_assert!(self.rx_buf.is_none());
396 }
397
398 #[allow(dead_code)]
401 fn enable_spi_host(&self) {
402 let regs = self.registers;
403 regs.ctrl.modify(ctrl::SPIEN::SET + ctrl::OUTPUT_EN::SET);
405 }
406
407 fn reset_spi_ip(&self) {
409 let regs = self.registers;
410 regs.ctrl.modify(ctrl::SW_RST::SET);
412
413 while regs.status.is_set(status::ACTIVE) {}
415 while regs.status.read(status::TXQD) != 0 && regs.status.read(status::RXQD) != 0 {}
417 regs.ctrl.modify(ctrl::SW_RST::CLEAR);
419 }
420
421 fn enable_interrupts(&self) {
423 self.registers
424 .intr_state
425 .write(intr::ERROR::SET + intr::SPI_EVENT::SET);
426 self.registers
427 .intr_enable
428 .modify(intr::ERROR::SET + intr::SPI_EVENT::SET);
429 }
430
431 fn disable_interrupts(&self) {
433 let regs = self.registers;
434 regs.intr_enable
435 .write(intr::ERROR::CLEAR + intr::SPI_EVENT::CLEAR);
436 }
437
438 fn clear_err_interrupt(&self) {
440 let regs = self.registers;
441 regs.err_status.modify(err_status::CMDBUSY::SET);
443 regs.err_status.modify(err_status::OVERFLOW::SET);
444 regs.err_status.modify(err_status::UNDERFLOW::SET);
445 regs.err_status.modify(err_status::CMDINVAL::SET);
446 regs.err_status.modify(err_status::CSIDINVAL::SET);
447 regs.err_status.modify(err_status::ACCESSINVAL::SET);
448 regs.intr_state.modify(intr::ERROR::SET);
450 }
451
452 fn clear_event_interrupt(&self) {
454 let regs = self.registers;
455 regs.intr_state.modify(intr::SPI_EVENT::SET);
456 }
457 #[allow(dead_code)]
460 fn test_error_interrupt(&self) {
461 let regs = self.registers;
462 regs.intr_test.write(intr::ERROR::SET);
463 }
464 #[allow(dead_code)]
467 fn clear_tests(&self) {
468 let regs = self.registers;
469 regs.intr_test
470 .write(intr::ERROR::CLEAR + intr::SPI_EVENT::CLEAR);
471 }
472
473 #[allow(dead_code)]
476 fn test_event_interrupt(&self) {
477 let regs = self.registers;
478 regs.intr_test.write(intr::SPI_EVENT::SET);
479 }
480
481 #[allow(dead_code)]
484 fn event_enable(&self) {
485 let regs = self.registers;
486 regs.event_en.write(event_en::TXEMPTY::SET);
487 }
488
489 fn disable_tx_interrupt(&self) {
490 let regs = self.registers;
491 regs.event_en.modify(event_en::TXEMPTY::CLEAR);
492 }
493
494 fn enable_tx_interrupt(&self) {
495 let regs = self.registers;
496 regs.event_en.modify(event_en::TXEMPTY::SET);
497 }
498
499 #[allow(dead_code)]
502 fn err_enable(&self) {
503 let regs = self.registers;
504 regs.err_en.modify(
505 err_en::CMDBUSY::SET
506 + err_en::CMDINVAL::SET
507 + err_en::CSIDINVAL::SET
508 + err_en::OVERFLOW::SET
509 + err_en::UNDERFLOW::SET,
510 );
511 }
512
513 fn set_spi_busy(&self) {
514 self.busy.set(true);
515 }
516
517 fn clear_spi_busy(&self) {
518 self.busy.set(false);
519 }
520
521 fn div_up(&self, a: usize, b: usize) -> usize {
524 a.div_ceil(b)
525 }
526
527 fn calculate_tsck_scaler(&self, rate: u32) -> Result<u16, ErrorCode> {
530 if rate > self.cpu_clk / 2 {
531 return Err(ErrorCode::NOSUPPORT);
532 }
533 let mut scaler: u32 = (self.cpu_clk / (2 * rate)) - 1;
535
536 if !self.cpu_clk.is_multiple_of(2 * rate) && scaler != 0xFF {
539 scaler += 1;
540 }
541 Ok(scaler as u16)
542 }
543}
544
545#[derive(Copy, Clone)]
546pub struct CS(pub u32);
547
548impl hil::spi::cs::IntoChipSelect<CS, hil::spi::cs::ActiveLow> for CS {
549 fn into_cs(self) -> CS {
550 self
551 }
552}
553
554impl<'a> hil::spi::SpiMaster<'a> for SpiHost<'a> {
555 type ChipSelect = CS;
556
557 fn init(&self) -> Result<(), ErrorCode> {
558 let regs = self.registers;
559 self.event_enable();
560 self.err_enable();
561
562 self.enable_interrupts();
563
564 self.enable_spi_host();
565
566 regs.tx_data.write(tx_data::DATA.val(0x00));
571 assert_eq!(regs.status.read(status::TXQD), 0);
572 Ok(())
573 }
574
575 fn set_client(&self, client: &'a dyn hil::spi::SpiMasterClient) {
576 self.client.set(client);
577 }
578
579 fn is_busy(&self) -> bool {
580 self.busy.get()
581 }
582
583 fn read_write_bytes(
584 &self,
585 tx_buf: SubSliceMut<'static, u8>,
586 rx_buf: Option<SubSliceMut<'static, u8>>,
587 ) -> Result<
588 (),
589 (
590 ErrorCode,
591 SubSliceMut<'static, u8>,
592 Option<SubSliceMut<'static, u8>>,
593 ),
594 > {
595 debug_assert!(!self.busy.get());
596 debug_assert!(self.tx_buf.is_none());
597 debug_assert!(self.rx_buf.is_none());
598 let regs = self.registers;
599
600 if self.is_busy() || regs.status.is_set(status::TXFULL) {
601 return Err((ErrorCode::BUSY, tx_buf, rx_buf));
602 }
603
604 if rx_buf.is_none() {
605 return Err((ErrorCode::NOMEM, tx_buf, rx_buf));
606 }
607
608 self.tx_len.set(tx_buf.len());
609
610 let mut t_byte: u32;
611 let mut tx_slice: [u8; 4];
612 self.set_spi_busy();
614
615 while !regs.status.is_set(status::TXFULL) && regs.status.read(status::TXQD) < 64 {
616 tx_slice = [0, 0, 0, 0];
617 for elem in tx_slice.iter_mut() {
618 if self.tx_offset.get() >= self.tx_len.get() {
619 break;
620 }
621 *elem = tx_buf[self.tx_offset.get()];
622 self.tx_offset.set(self.tx_offset.get() + 1);
623 }
624 t_byte = u32::from_le_bytes(tx_slice);
625 regs.tx_data.write(tx_data::DATA.val(t_byte));
626
627 if self.tx_offset.get() >= self.tx_len.get() {
629 break;
630 }
631 }
632
633 self.tx_buf.replace(tx_buf);
635
636 rx_buf.map(|rx_buf_t| {
639 self.rx_len.set(cmp::min(self.tx_len.get(), rx_buf_t.len()));
640 self.rx_buf.replace(rx_buf_t);
641 });
642
643 self.start_transceive();
645
646 Ok(())
647 }
648
649 fn write_byte(&self, _val: u8) -> Result<(), ErrorCode> {
650 Err(ErrorCode::FAIL)
652 }
653
654 fn read_byte(&self) -> Result<u8, ErrorCode> {
655 Err(ErrorCode::FAIL)
657 }
658
659 fn read_write_byte(&self, _val: u8) -> Result<u8, ErrorCode> {
660 Err(ErrorCode::FAIL)
662 }
663
664 fn specify_chip_select(&self, cs: Self::ChipSelect) -> Result<(), ErrorCode> {
665 let regs = self.registers;
666
667 regs.csid.write(csid_ctrl::CSID.val(cs.0));
669
670 Ok(())
671 }
672
673 fn set_rate(&self, rate: u32) -> Result<u32, ErrorCode> {
674 let regs = self.registers;
675
676 match self.calculate_tsck_scaler(rate) {
677 Ok(scaler) => {
678 regs.config_opts
679 .modify(conf_opts::CLKDIV_0.val(scaler as u32));
680 self.tsclk.set(rate);
681 Ok(rate)
682 }
683 Err(e) => Err(e),
684 }
685 }
686
687 fn get_rate(&self) -> u32 {
688 self.tsclk.get()
689 }
690
691 fn set_polarity(&self, polarity: ClockPolarity) -> Result<(), ErrorCode> {
692 let regs = self.registers;
693 match polarity {
694 ClockPolarity::IdleLow => regs.config_opts.modify(conf_opts::CPOL_0::CLEAR),
695 ClockPolarity::IdleHigh => regs.config_opts.modify(conf_opts::CPOL_0::SET),
696 }
697 Ok(())
698 }
699
700 fn get_polarity(&self) -> ClockPolarity {
701 let regs = self.registers;
702
703 match regs.config_opts.read(conf_opts::CPOL_0) {
704 0 => ClockPolarity::IdleLow,
705 1 => ClockPolarity::IdleHigh,
706 _ => unreachable!(),
707 }
708 }
709
710 fn set_phase(&self, phase: ClockPhase) -> Result<(), ErrorCode> {
711 let regs = self.registers;
712 match phase {
713 ClockPhase::SampleLeading => regs.config_opts.modify(conf_opts::CPHA_0::CLEAR),
714 ClockPhase::SampleTrailing => regs.config_opts.modify(conf_opts::CPHA_0::SET),
715 }
716 Ok(())
717 }
718
719 fn get_phase(&self) -> ClockPhase {
720 let regs = self.registers;
721
722 match regs.config_opts.read(conf_opts::CPHA_0) {
723 1 => ClockPhase::SampleTrailing,
724 0 => ClockPhase::SampleLeading,
725 _ => unreachable!(),
726 }
727 }
728
729 fn hold_low(&self) {
732 unimplemented!("spi_host: does not support hold low");
733 }
734
735 fn release_low(&self) {
738 unimplemented!("spi_host: does not support release low");
739 }
740}