source: svn/newcon3bcm2_21bu/magnum/portinginterface/mmd/7552/bmmd.c @ 47

Last change on this file since 47 was 47, checked in by megakiss, 11 years ago

459Mhz로 OTC 주파수 변경

  • Property svn:executable set to *
File size: 56.6 KB
Line 
1/***************************************************************************
2 *     Copyright (c) 2003-2012, Broadcom Corporation
3 *     All Rights Reserved
4 *     Confidential Property of Broadcom Corporation
5 *
6 *  THIS SOFTWARE MAY ONLY BE USED SUBJECT TO AN EXECUTED SOFTWARE LICENSE
7 *  AGREEMENT  BETWEEN THE USER AND BROADCOM.  YOU HAVE NO RIGHT TO USE OR
8 *  EXPLOIT THIS MATERIAL EXCEPT SUBJECT TO THE TERMS OF SUCH AN AGREEMENT.
9 *
10 * $brcm_Workfile: bmmd.c $
11 * $brcm_Revision: Hydra_Software_Devel/35 $
12 * $brcm_Date: 3/30/12 11:56a $
13 *
14 * Module Description:
15 *
16 * Revision History:
17 *
18 * $brcm_Log: /magnum/portinginterface/mmd/7425/bmmd.c $
19 *
20 * Hydra_Software_Devel/35   3/30/12 11:56a jtna
21 * SW7425-2800: move BMMD_Context_P_Start() inside critical section
22 *
23 * Hydra_Software_Devel/34   2/2/12 12:12p jtna
24 * SW7346-663: fix compile for non-40nm platforms
25 *
26 * Hydra_Software_Devel/33   2/2/12 10:57a jtna
27 * SW7346-663: clear descriptor word 4 correctly when switching back to
28 * no-scrambling
29 *
30 * Hydra_Software_Devel/32   12/14/11 3:41p jtna
31 * SW7550-774: save BMMD_Settings in MMD handle
32 *
33 * Hydra_Software_Devel/31   11/29/11 11:27a jtna
34 * SWDTV-7838: adapt to new BCHP_FIELD_DATA macro, for SHARF-only
35 * platforms
36 *
37 * Hydra_Software_Devel/30   11/11/11 5:59p jtna
38 * SW7425-1709: disable/enable BINT callback on standby/resume
39 *
40 * Hydra_Software_Devel/29   11/9/11 10:57a jtna
41 * SW7425-1709: silence compiler warning for undef BCHP_PWR_RESOURCE_DMA
42 *
43 * Hydra_Software_Devel/28   11/8/11 5:55p jtna
44 * SW7425-1709: add standby/resume
45 *
46 * Hydra_Software_Devel/27   11/8/11 3:06p jtna
47 * SW7425-1079: move power management from nexus_dma to bmmd
48 *
49 * Hydra_Software_Devel/26   10/31/11 2:59p jtna
50 * SW7435-15: fix compiler warning
51 *
52 * Hydra_Software_Devel/24   10/28/11 11:13a jtna
53 * SW7435-15: add 7435 support
54 *
55 * Hydra_Software_Devel/23   9/26/11 2:05p jtna
56 * SW7420-2072: run-time check that transfers do not corrupt MMD
57 * descriptor memory locations
58 *
59 * Hydra_Software_Devel/22   9/14/11 11:43a jtna
60 * SW7420-2046: add memory bounds
61 *
62 * Hydra_Software_Devel/21   9/6/11 6:12p jtna
63 * SW7429-13: added 7429 support
64 *
65 * Hydra_Software_Devel/20   7/14/11 2:14p jtna
66 * SWDTV-7838: add back missing newline
67 *
68 * Hydra_Software_Devel/19   7/14/11 9:49a jtna
69 * SWDTV-7838: adapt to new BCHP_FIELD_DATA macro
70 *
71 * Hydra_Software_Devel/18   7/7/11 10:55a jtna
72 * SW7422-416: update desc_dump debug output
73 *
74 * Hydra_Software_Devel/17   7/6/11 9:41a jtna
75 * SW7425-678: merge new impl
76 *
77 * Hydra_Software_Devel/16   6/13/11 2:59p jtna
78 * SW7422-416: update debug msgs
79 *
80 * Hydra_Software_Devel/15   6/2/11 3:36p jtna
81 * SWBLURAY-26038: include correct header for 7640
82 *
83 * Hydra_Software_Devel/14   6/1/11 4:17p jtna
84 * SWBLURAY-26038: add 7640 support
85 *
86 * Hydra_Software_Devel/13   5/19/11 11:27a jtna
87 * SW7408-280: 7408 has key size of 8
88 *
89 * Hydra_Software_Devel/12   5/19/11 11:13a jtna
90 * SW7550-742: comment out BDBG_MSG_TRACE
91 *
92 * Hydra_Software_Devel/11   5/12/11 12:31p jtna
93 * SW7550-742: merge SHARF support
94 *
95 * Hydra_Software_Devel/10   4/29/11 4:23p jtna
96 * SW7422-416: update comments
97 *
98 * Hydra_Software_Devel/9   4/25/11 11:10a jtna
99 * SW7422-416: more handling of RDB variations
100 *
101 * Hydra_Software_Devel/8   4/22/11 5:53p jtna
102 * SW7422-435: handle SG crypto -> clear transition for 65nm platforms
103 *
104 * Hydra_Software_Devel/7   4/22/11 4:29p jtna
105 * SW7422-416: remove unneeded irqEnabled
106 *
107 * Hydra_Software_Devel/6   4/22/11 3:42p jtna
108 * SW7422-434: add handling of race condition
109 *
110 * Hydra_Software_Devel/5   4/19/11 2:26p jtna
111 * SW7422-416: register correct interrupt for second DMA HW block
112 *
113 * Hydra_Software_Devel/4   4/19/11 12:07p jtna
114 * SW7422-416: SG_ENABLE must only be 1 between START and END
115 *
116 * Hydra_Software_Devel/3   4/19/11 10:30a jtna
117 * SW7422-416: added BCHP_INT_ID_BSP_CONTROL_INTR2_MEM_DMA_0_INTR
118 *
119 * Hydra_Software_Devel/2   4/15/11 5:49p jtna
120 * SW7422-416: merge to main
121 *
122 ***************************************************************************/
123
124#include "bmmd.h"
125#include "blst_squeue.h"
126#include "blst_slist.h"
127#include "bchp_common.h"
128
129#if BCHP_PWR_SUPPORT
130#include "bchp_pwr.h"
131#endif
132
133/* MEM_DMA HW */
134#if BCHP_CHIP==3548 || BCHP_CHIP==3556 || BCHP_CHIP==3563 || BCHP_CHIP==7118 || \
135    BCHP_CHIP==7401 || BCHP_CHIP==7403 || BCHP_CHIP==7440 || BCHP_CHIP==7601 || \
136    BCHP_CHIP==7630 || BCHP_CHIP==7635
137#define BMMD_P_NUM_DMA_ENGINES 1
138#include "bchp_mem_dma.h"
139#elif BCHP_CHIP==7400 || BCHP_CHIP==7435 /* 7400 A0 has one DMA engine but B0 has two. no support for 7400 A0 */
140#define BMMD_P_NUM_DMA_ENGINES 2
141#include "bchp_mem_dma_0.h"
142#include "bchp_mem_dma_1.h"
143#elif BCHP_CHIP==7550 /* 7550 is the only known instance of no MEM_DMA HW */
144#define BMMD_P_NUM_DMA_ENGINES 0
145#else
146#define BMMD_P_NUM_DMA_ENGINES 1
147#include "bchp_mem_dma_0.h"
148#endif
149
150/* SHARF_DMA HW */
151#if ((BCHP_CHIP==7550) && (BCHP_VER>=BCHP_VER_B0)) || BCHP_CHIP==7640 || \
152      BCHP_CHIP==35125 || ((BCHP_CHIP==35230) && (BCHP_VER>=BCHP_VER_C0)) || \
153      BCHP_CHIP==35233
154#define BMMD_P_NUM_SHARF_DMA_ENGINES 2
155#include "bchp_sharf_mem_dma0.h"
156#include "bchp_sharf_mem_dma1.h"
157/* 7440 C2 has 2 SHARF and 1 DMA. 7630 B0 has 2 SHARF and 1 DMA. No SHARF support for these until requested */
158#else
159#define BMMD_P_NUM_SHARF_DMA_ENGINES 0
160#endif
161
162#if (BCHP_CHIP==7550) && (BCHP_VER>=BCHP_VER_B0)
163#define BMMD_P_HAS_LMC_CORE 1
164#include "bchp_sharf_lmc_core.h"
165#endif
166
167/* MEM_DMA interrupt */
168#if BCHP_CHIP==7125 || BCHP_CHIP==7231 || BCHP_CHIP==7340 || BCHP_CHIP==7342 || \
169    BCHP_CHIP==7344 || BCHP_CHIP==7346 || BCHP_CHIP==7358 || BCHP_CHIP==7420 || \
170    BCHP_CHIP==7422 || BCHP_CHIP==7425 || BCHP_CHIP==7429 || BCHP_CHIP==7435 || \
171    BCHP_CHIP==7468 || BCHP_CHIP==7552 || BCHP_CHIP==7640
172#include "bchp_int_id_bsp_control_intr2.h"
173#elif BCHP_CHIP==7400 || BCHP_CHIP==7405
174#include "bchp_int_id_memc16_gfx_l2.h"
175#elif BCHP_CHIP==7325 || BCHP_CHIP==7335 || BCHP_CHIP==7336
176#include "bchp_int_id_graphics_l2.h"
177#elif BCHP_CHIP==7408 || BCHP_CHIP==7440 || BCHP_CHIP==3563 || BCHP_CHIP==35230 || \
178      BCHP_CHIP==35125 || BCHP_CHIP==35233
179#include "bchp_int_id_xpt_bus_if.h"
180#elif BCHP_CHIP==3548 || BCHP_CHIP==3556
181#include "bchp_int_id_gfx_l2.h"
182#else
183#include "bchp_int_id_hif_intr2.h"
184#endif
185
186/* SHARF_DMA interrupt */
187#if BMMD_P_NUM_SHARF_DMA_ENGINES
188#if BCHP_CHIP==7640
189#include "bchp_int_id_hif_intr2.h"
190#else
191#include "bchp_int_id_sharf_intr2.h"
192#endif
193#endif
194
195/* non-40nm platform */
196#if BCHP_CHIP==7038 || BCHP_CHIP==7118 || BCHP_CHIP==7125 || BCHP_CHIP==7325 || \
197    BCHP_CHIP==7335 || BCHP_CHIP==7336 || BCHP_CHIP==7340 || BCHP_CHIP==7342 || \
198    BCHP_CHIP==7400 || BCHP_CHIP==7401 || BCHP_CHIP==7403 || BCHP_CHIP==7405 || \
199    BCHP_CHIP==7408 || BCHP_CHIP==7420 || BCHP_CHIP==7438 || BCHP_CHIP==7440 || \
200    BCHP_CHIP==7468 || BCHP_CHIP==7550 || BCHP_CHIP==7601 || BCHP_CHIP==7630 || \
201    BCHP_CHIP==7635 || BCHP_CHIP==7640 || \
202    BCHP_CHIP==3548 || BCHP_CHIP==3556 || BCHP_CHIP==3560 || BCHP_CHIP==3563 || \
203    BCHP_CHIP==35125 || BCHP_CHIP==35130 || BCHP_CHIP==35230 || BCHP_CHIP==35233
204#define BMMD_P_DMA_REV_2A 0
205#else
206#define BMMD_P_DMA_REV_2A 1
207#endif
208
209#ifndef BCHP_MEM_DMA_0_FIRST_DESC
210    #ifdef BCHP_MEM_DMA_FIRST_DESC /* name difference */
211    #define BCHP_MEM_DMA_0_FIRST_DESC BCHP_MEM_DMA_FIRST_DESC
212    #define BCHP_MEM_DMA_0_CTRL BCHP_MEM_DMA_CTRL
213    #define BCHP_MEM_DMA_0_WAKE_CTRL BCHP_MEM_DMA_WAKE_CTRL
214    #define BCHP_MEM_DMA_0_STATUS BCHP_MEM_DMA_STATUS
215    #define BCHP_MEM_DMA_0_CUR_DESC BCHP_MEM_DMA_CUR_DESC
216    #define BCHP_MEM_DMA_0_CUR_BYTE BCHP_MEM_DMA_CUR_BYTE
217    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Idle BCHP_MEM_DMA_STATUS_DMA_STATUS_Idle
218    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy BCHP_MEM_DMA_STATUS_DMA_STATUS_Busy
219    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Sleep BCHP_MEM_DMA_STATUS_DMA_STATUS_Sleep
220    #elif (defined BCHP_SHARF_MEM_DMA0_FIRST_DESC) /* SHARF-only platforms */
221    #define BCHP_MEM_DMA_0_FIRST_DESC BCHP_SHARF_MEM_DMA0_FIRST_DESC
222    #define BCHP_MEM_DMA_0_CTRL BCHP_SHARF_MEM_DMA0_CTRL
223    #define BCHP_MEM_DMA_0_WAKE_CTRL BCHP_SHARF_MEM_DMA0_WAKE_CTRL
224    #define BCHP_MEM_DMA_0_STATUS BCHP_SHARF_MEM_DMA0_STATUS
225    #define BCHP_MEM_DMA_0_CUR_DESC BCHP_SHARF_MEM_DMA0_CUR_DESC
226    #define BCHP_MEM_DMA_0_CUR_BYTE BCHP_SHARF_MEM_DMA0_CUR_BYTE
227    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Idle BCHP_SHARF_MEM_DMA0_STATUS_DMA_STATUS_Idle
228    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy BCHP_SHARF_MEM_DMA0_STATUS_DMA_STATUS_Busy
229    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Sleep BCHP_SHARF_MEM_DMA0_STATUS_DMA_STATUS_Sleep
230    #endif
231#endif
232
233#ifndef BCHP_INT_ID_MEM_DMA_0_INTR
234    #ifdef BCHP_INT_ID_MEM_DMA_INTR
235    #define BCHP_INT_ID_MEM_DMA_0_INTR BCHP_INT_ID_MEM_DMA_INTR
236    #elif defined BCHP_INT_ID_XPT_BUS_IF_MEM_DMA_INTR
237    #define BCHP_INT_ID_MEM_DMA_0_INTR BCHP_INT_ID_XPT_BUS_IF_MEM_DMA_INTR
238    #elif defined BCHP_INT_ID_BSP_CONTROL_INTR2_MEM_DMA_0_INTR
239    #define BCHP_INT_ID_MEM_DMA_0_INTR BCHP_INT_ID_BSP_CONTROL_INTR2_MEM_DMA_0_INTR
240    #elif defined BCHP_INT_ID_MEM_DMA_INT
241    #define BCHP_INT_ID_MEM_DMA_0_INTR BCHP_INT_ID_MEM_DMA_INT
242    #elif (BMMD_P_NUM_DMA_ENGINES==0)
243    #define BCHP_INT_ID_MEM_DMA_0_INTR 0
244    #endif
245#endif
246
247#ifndef BCHP_INT_ID_MEM_DMA_1_INTR
248#define BCHP_INT_ID_MEM_DMA_1_INTR 0
249#endif
250
251#ifndef BCHP_INT_ID_SHARF_MEM_DMA0_DONE
252#define BCHP_INT_ID_SHARF_MEM_DMA0_DONE 0
253#endif
254
255#ifndef BCHP_INT_ID_SHARF_MEM_DMA1_DONE
256#define BCHP_INT_ID_SHARF_MEM_DMA1_DONE 0
257#endif
258
259/* register masks and shifts */
260#ifndef BCHP_MEM_DMA_0_FIRST_DESC_ADDR_SHIFT
261    #ifdef BCHP_MEM_DMA_FIRST_DESC_ADDR_SHIFT /* name difference */
262    #define BCHP_MEM_DMA_0_FIRST_DESC_ADDR_MASK      BCHP_MEM_DMA_FIRST_DESC_ADDR_MASK
263    #define BCHP_MEM_DMA_0_FIRST_DESC_ADDR_SHIFT     BCHP_MEM_DMA_FIRST_DESC_ADDR_SHIFT
264    #define BCHP_MEM_DMA_0_CTRL_RUN_MASK             BCHP_MEM_DMA_CTRL_RUN_MASK
265    #define BCHP_MEM_DMA_0_CTRL_RUN_SHIFT            BCHP_MEM_DMA_CTRL_RUN_SHIFT
266    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_MODE_MASK  BCHP_MEM_DMA_WAKE_CTRL_WAKE_MODE_MASK
267    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_MODE_SHIFT BCHP_MEM_DMA_WAKE_CTRL_WAKE_MODE_SHIFT
268    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_MASK       BCHP_MEM_DMA_WAKE_CTRL_WAKE_MASK
269    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_SHIFT      BCHP_MEM_DMA_WAKE_CTRL_WAKE_SHIFT
270    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_MASK    BCHP_MEM_DMA_STATUS_DMA_STATUS_MASK
271    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_SHIFT   BCHP_MEM_DMA_STATUS_DMA_STATUS_SHIFT
272    #elif (defined BCHP_SHARF_MEM_DMA0_FIRST_DESC_ADDR_SHIFT) /* SHARF-only platforms */
273    #define BCHP_MEM_DMA_0_FIRST_DESC_ADDR_MASK      BCHP_SHARF_MEM_DMA0_FIRST_DESC_ADDR_MASK
274    #define BCHP_MEM_DMA_0_FIRST_DESC_ADDR_SHIFT     BCHP_SHARF_MEM_DMA0_FIRST_DESC_ADDR_SHIFT
275    #define BCHP_MEM_DMA_0_CTRL_RUN_MASK             BCHP_SHARF_MEM_DMA0_CTRL_RUN_MASK
276    #define BCHP_MEM_DMA_0_CTRL_RUN_SHIFT            BCHP_SHARF_MEM_DMA0_CTRL_RUN_SHIFT
277    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_MODE_MASK  BCHP_SHARF_MEM_DMA0_WAKE_CTRL_WAKE_MODE_MASK
278    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_MODE_SHIFT BCHP_SHARF_MEM_DMA0_WAKE_CTRL_WAKE_MODE_SHIFT
279    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_MASK       BCHP_SHARF_MEM_DMA0_WAKE_CTRL_WAKE_MASK
280    #define BCHP_MEM_DMA_0_WAKE_CTRL_WAKE_SHIFT      BCHP_SHARF_MEM_DMA0_WAKE_CTRL_WAKE_SHIFT
281    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_MASK    BCHP_SHARF_MEM_DMA0_STATUS_DMA_STATUS_MASK
282    #define BCHP_MEM_DMA_0_STATUS_DMA_STATUS_SHIFT   BCHP_SHARF_MEM_DMA0_STATUS_DMA_STATUS_SHIFT
283    #endif
284#endif
285
286/* descriptor word definitions */
287#ifndef BCHP_MEM_DMA_DESC_WORD0_READ_ADDR_SHIFT /* SHARF-only platforms */
288#define BCHP_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_LITTLE_ENDIAN BCHP_SHARF_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_LITTLE_ENDIAN
289#define BCHP_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_BIG_ENDIAN BCHP_SHARF_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_BIG_ENDIAN
290#define BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_WORD_ALIGNED BCHP_SHARF_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_WORD_ALIGNED
291#define BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_HALF_WORD_ALIGNED BCHP_SHARF_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_HALF_WORD_ALIGNED
292#define BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_BYTE_ALIGNED BCHP_SHARF_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_BYTE_ALIGNED
293
294#define BCHP_MEM_DMA_DESC_WORD0_READ_ADDR_MASK BCHP_SHARF_MEM_DMA_DESC_WORD0_READ_ADDR_MASK
295#define BCHP_MEM_DMA_DESC_WORD0_READ_ADDR_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD0_READ_ADDR_SHIFT
296#define BCHP_MEM_DMA_DESC_WORD1_WRITE_ADDR_MASK BCHP_SHARF_MEM_DMA_DESC_WORD1_WRITE_ADDR_MASK
297#define BCHP_MEM_DMA_DESC_WORD1_WRITE_ADDR_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD1_WRITE_ADDR_SHIFT
298#define BCHP_MEM_DMA_DESC_WORD2_TRANSFER_SIZE_MASK BCHP_SHARF_MEM_DMA_DESC_WORD2_TRANSFER_SIZE_MASK
299#define BCHP_MEM_DMA_DESC_WORD2_TRANSFER_SIZE_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD2_TRANSFER_SIZE_SHIFT
300#define BCHP_MEM_DMA_DESC_WORD2_INTR_ENABLE_MASK BCHP_SHARF_MEM_DMA_DESC_WORD2_INTR_ENABLE_MASK
301#define BCHP_MEM_DMA_DESC_WORD2_INTR_ENABLE_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD2_INTR_ENABLE_SHIFT
302#define BCHP_MEM_DMA_DESC_WORD2_LAST_MASK BCHP_SHARF_MEM_DMA_DESC_WORD2_LAST_MASK
303#define BCHP_MEM_DMA_DESC_WORD2_LAST_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD2_LAST_SHIFT
304#define BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_MASK BCHP_SHARF_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_MASK
305#define BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_SHIFT
306#define BCHP_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_MASK BCHP_SHARF_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_MASK
307#define BCHP_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_SHIFT
308#define BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_MASK BCHP_SHARF_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_MASK
309#define BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT BCHP_SHARF_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT
310/* WORD4 is not common between DMA and SHARF */
311
312#define BCHP_MEM_DMA_DESC_WORD4_KEY_SELECT_MASK      0
313#define BCHP_MEM_DMA_DESC_WORD4_KEY_SELECT_SHIFT     0
314#define BCHP_MEM_DMA_DESC_WORD4_MODE_SEL_MASK        0
315#define BCHP_MEM_DMA_DESC_WORD4_MODE_SEL_SHIFT       0
316#define BCHP_MEM_DMA_DESC_WORD4_ENC_DEC_INIT_MASK    0
317#define BCHP_MEM_DMA_DESC_WORD4_ENC_DEC_INIT_SHIFT   0
318#define BCHP_MEM_DMA_DESC_WORD4_SG_ENABLE_MASK       0
319#define BCHP_MEM_DMA_DESC_WORD4_SG_ENABLE_SHIFT      0
320#define BCHP_MEM_DMA_DESC_WORD4_SG_SCRAM_START_SHIFT 0
321#define BCHP_MEM_DMA_DESC_WORD4_SG_SCRAM_START_MASK  0
322#define BCHP_MEM_DMA_DESC_WORD4_SG_SCRAM_END_SHIFT   0
323#define BCHP_MEM_DMA_DESC_WORD4_SG_SCRAM_END_MASK    0
324#endif
325
326#ifndef BCHP_SHARF_MEM_DMA_DESC_WORD4_KEY_PRESENT_SHIFT /* no-SHARF platforms */
327#define BCHP_SHARF_MEM_DMA_DESC_WORD4_KEY_PRESENT_SHIFT    0
328#define BCHP_SHARF_MEM_DMA_DESC_WORD4_KEY_PRESENT_MASK     0
329#define BCHP_SHARF_MEM_DMA_DESC_WORD4_DIGEST_PRESENT_SHIFT 0
330#define BCHP_SHARF_MEM_DMA_DESC_WORD4_DIGEST_PRESENT_MASK  0
331#define BCHP_SHARF_MEM_DMA_DESC_WORD4_USE_BSP_KEY_SHIFT    0
332#define BCHP_SHARF_MEM_DMA_DESC_WORD4_USE_BSP_KEY_MASK     0
333#define BCHP_SHARF_MEM_DMA_DESC_WORD4_MODE_SEL_SHIFT       0
334#define BCHP_SHARF_MEM_DMA_DESC_WORD4_MODE_SEL_MASK        0
335#define BCHP_SHARF_MEM_DMA_DESC_WORD4_SG_ENABLE_SHIFT      0
336#define BCHP_SHARF_MEM_DMA_DESC_WORD4_SG_ENABLE_MASK       0
337#define BCHP_SHARF_MEM_DMA_DESC_WORD4_SG_SCRAM_START_SHIFT 0
338#define BCHP_SHARF_MEM_DMA_DESC_WORD4_SG_SCRAM_START_MASK  0
339#define BCHP_SHARF_MEM_DMA_DESC_WORD4_SG_SCRAM_END_SHIFT   0
340#define BCHP_SHARF_MEM_DMA_DESC_WORD4_SG_SCRAM_END_MASK    0
341#define BCHP_SHARF_MEM_DMA_DESC_WORD4_CONTEXT_NUM_SHIFT    0
342#define BCHP_SHARF_MEM_DMA_DESC_WORD4_CONTEXT_NUM_MASK     0
343#endif
344
345/* this came from BDMA_P_MEM_KEY_SIZE in bdma_mem_priv.h.
346   it's not used in SHARF, and appears to be a DTV HW quirk */
347#if BCHP_CHIP==35125 || BCHP_CHIP==35230 || BCHP_CHIP==35233 || BCHP_CHIP==7408
348#define BMMD_P_KEY_SIZE 8
349#else
350#define BMMD_P_KEY_SIZE 6
351#endif
352
353/* magic number written to DESC_WORD5 to indicate QMD completion. 'BMMD' in ASCII */
354#define BMMD_P_COMPLETION_CODE 0x424d4d44
355
356BDBG_MODULE(bmmd);
357BDBG_OBJECT_ID(BMMD_Handle_Tag);
358BDBG_OBJECT_ID(BMMD_Context);
359
360#define BMMD_P_DESC_PROTECT 1 /* run-time check for transfers that corrupt descriptor memory location */
361
362#define BDBG_MSG_TRACE(x) /*BDBG_MSG(x)*/
363#define BMMD_DEBUG_MODE 0 /* extra asserts for sanity checks */
364
365void BMMD_Context_P_DescDump(BMMD_ContextHandle ctx, unsigned numBlocks);
366
367typedef struct BMMD_Context
368{
369    BDBG_OBJECT(BMMD_Context)
370    BMMD_Handle parent;   
371    uint32_t dmaDesc_3; /* prototype of dmaDesc[3] */
372    uint32_t dmaDesc_4; /* prototype of dmaDesc[4] */
373    void *firstDesc;          /* pointer to first descriptor */
374    uint32_t firstDescOffset; /* physical address of first descriptor */
375    BMMD_ContextSettings settings;
376
377    BLST_S_ENTRY(BMMD_Context) ctxNode;
378    BLST_SQ_ENTRY(BMMD_Context) activeNode;
379    enum {
380        BMMD_Context_P_State_eIdle, 
381        BMMD_Context_P_State_eQueued,   /* ctx is queued in activeNode and hardware */
382        BMMD_Context_P_State_eDestroyed /* Context_Destroy() was called */
383    } state;
384
385    volatile uint32_t *lastDesc; /* address of last descriptor that is used to mark the end of the chain */
386    uint32_t lastDescOffset;     /* physical address of last descriptor */
387    unsigned dmaLength; /* total length (in bytes) of DMA transaction */
388    bool sg; /* true if context has sgScram descriptor(s) */
389} BMMD_Context;
390
391typedef struct BMMD_Handle_Tag
392{
393    BDBG_OBJECT(BMMD_Handle_Tag)
394    BCHP_Handle chp;
395    BREG_Handle reg;
396    BMEM_Handle mem;
397    BINT_Handle bint;
398    BINT_CallbackHandle irq;
399    uint32_t baseRegOffset; /* 0 if using HW index 0 */
400    BMMD_Settings settings;
401    uint32_t *qmdConst; /* mem location that holds constant value to indicate QMD completion */
402    uint32_t qmdConstOffset;
403    BMMD_ContextHandle lastQueuedCtx;
404    unsigned numSgCtx;
405    BLST_S_HEAD(CtxList, BMMD_Context) ctxList;
406    BLST_SQ_HEAD(ActiveCtxList, BMMD_Context) activeCtxList;
407    bool standby;
408} BMMD_Handle_Tag;
409
410#define BMMD_P_DESC_SIZE (4*8)
411#define BMMD_P_MAX_TRANSFER_SIZE (0xFFFFFF)
412
413static void BMMD_P_CompleteCallback_isr(void *pParam1, int parm2);
414
415void BMMD_GetDefaultSettings(
416    BMMD_Settings *pSettings
417    )
418{
419    BDBG_ASSERT(pSettings != NULL);
420    BKNI_Memset(pSettings, 0, sizeof(*pSettings));
421}
422
423#define BMMD_MDEBUG_MODE 0
424#define BMMD_MDEBUG_MEM_ENTRIES 102400
425#define BMMD_MDEBUG_MEM_ENTRY_SIZE 16
426#define BMMD_MDEBUG_WRITE(x) \
427    do { \
428        BDBG_ASSERT(g_current<BMMD_MDEBUG_MEM_ENTRIES*BMMD_MDEBUG_MEM_ENTRY_SIZE); \
429        *((uint32_t*)g_debugMem+g_current) = x; \
430        g_current+=1; } \
431    while(0)
432
433#if BMMD_MDEBUG_MODE
434static void* g_debugMem;
435uint32_t g_debugOffset;
436static unsigned g_current = 0;
437#endif
438
439BERR_Code BMMD_Open(
440    BCHP_Handle hChp, 
441    BREG_Handle hReg, 
442    BMEM_Handle hMem, 
443    BINT_Handle hInt, 
444    const BMMD_Settings *pSettings, 
445    BMMD_Handle *phMmd
446    )
447{
448    BMMD_Handle pMmd = NULL;
449    BERR_Code rc;
450    uint32_t intr;
451   
452    BDBG_ASSERT(hChp);
453    BDBG_ASSERT(hReg);
454    BDBG_ASSERT(hMem);
455    BDBG_ASSERT(hInt);
456    BDBG_ASSERT(pSettings);
457
458    BDBG_ASSERT(pSettings->engineType<BMMD_EngineType_eMax);
459#if (BMMD_P_NUM_SHARF_DMA_ENGINES && BMMD_P_NUM_DMA_ENGINES)
460    /* for DMA-only or SHARF-only platforms, this is not needed.
461       but for platforms with both, SHARF register access relies on this */
462    BDBG_CASSERT(BCHP_SHARF_MEM_DMA0_CTRL     -BCHP_SHARF_MEM_DMA0_FIRST_DESC == BCHP_MEM_DMA_0_CTRL     -BCHP_MEM_DMA_0_FIRST_DESC);
463    BDBG_CASSERT(BCHP_SHARF_MEM_DMA0_WAKE_CTRL-BCHP_SHARF_MEM_DMA0_FIRST_DESC == BCHP_MEM_DMA_0_WAKE_CTRL-BCHP_MEM_DMA_0_FIRST_DESC);
464    BDBG_CASSERT(BCHP_SHARF_MEM_DMA0_STATUS   -BCHP_SHARF_MEM_DMA0_FIRST_DESC == BCHP_MEM_DMA_0_STATUS   -BCHP_MEM_DMA_0_FIRST_DESC);
465    BDBG_CASSERT(BCHP_SHARF_MEM_DMA0_CUR_DESC -BCHP_SHARF_MEM_DMA0_FIRST_DESC == BCHP_MEM_DMA_0_CUR_DESC -BCHP_MEM_DMA_0_FIRST_DESC);
466    BDBG_CASSERT(BCHP_SHARF_MEM_DMA0_CUR_BYTE -BCHP_SHARF_MEM_DMA0_FIRST_DESC == BCHP_MEM_DMA_0_CUR_BYTE -BCHP_MEM_DMA_0_FIRST_DESC);
467#endif
468
469    if (pSettings->engineType==BMMD_EngineType_eDma) {
470        if (pSettings->engineIndex>=BMMD_P_NUM_DMA_ENGINES) {
471            rc = BERR_TRACE(BERR_INVALID_PARAMETER);
472            goto error;
473        }
474    }
475    else {
476        if (pSettings->engineIndex>=BMMD_P_NUM_SHARF_DMA_ENGINES) {
477            rc = BERR_TRACE(BERR_INVALID_PARAMETER);
478            goto error;
479        }
480    }
481
482    *phMmd = NULL;
483    pMmd = BKNI_Malloc(sizeof(*pMmd));
484    if (pMmd == NULL) { 
485        rc = BERR_TRACE(BERR_OUT_OF_SYSTEM_MEMORY); 
486        goto error;
487    }
488    BKNI_Memset(pMmd, 0, sizeof(*pMmd));
489    BDBG_OBJECT_SET(pMmd, BMMD_Handle_Tag);
490
491    pMmd->chp = hChp;
492    pMmd->reg = hReg;
493    pMmd->mem = hMem;
494    pMmd->bint = hInt;
495    pMmd->settings = *pSettings;
496
497    pMmd->baseRegOffset = 0;
498    /* baseRegOffset abstracts the HW address (DMA_0/DMA_1/SHARF_0/SHARF_1) differences.
499       DMA only platforms:      BCHP_MEM_DMA_0_* points to itself
500       SHARF only platforms:    BCHP_MEM_DMA_0_* is defined to point to BCHP_SHARF_MEM_DMA0_*
501       DMA and SHARF platforms: BCHP_MEM_DMA_0_* points to itself
502                                SHARF requires a special offset */
503#if (BMMD_P_NUM_DMA_ENGINES==2)
504    if (pSettings->engineType==BMMD_EngineType_eDma && pSettings->engineIndex==1) {
505        pMmd->baseRegOffset = BCHP_MEM_DMA_1_FIRST_DESC-BCHP_MEM_DMA_0_FIRST_DESC;
506    }
507#endif
508#if (BMMD_P_NUM_DMA_ENGINES && BMMD_P_NUM_SHARF_DMA_ENGINES)
509    if (pSettings->engineType==BMMD_EngineType_eSharf) {
510        pMmd->baseRegOffset = BCHP_SHARF_MEM_DMA0_FIRST_DESC-BCHP_MEM_DMA_0_FIRST_DESC;
511    }
512#endif
513#if (BMMD_P_NUM_SHARF_DMA_ENGINES==2)
514    if (pSettings->engineType==BMMD_EngineType_eSharf && pSettings->engineIndex==1) {
515        pMmd->baseRegOffset += (BCHP_SHARF_MEM_DMA1_FIRST_DESC-BCHP_SHARF_MEM_DMA0_FIRST_DESC);
516    }
517#endif
518    BDBG_MSG_TRACE(("%#lx: baseRegOffset %#lx", pMmd, pMmd->baseRegOffset));
519   
520    BLST_S_INIT(&pMmd->ctxList);
521    BLST_SQ_INIT(&pMmd->activeCtxList);
522
523#ifdef BCHP_PWR_RESOURCE_DMA
524    BCHP_PWR_AcquireResource(pMmd->chp, BCHP_PWR_RESOURCE_DMA);
525#endif
526    BREG_Write32(pMmd->reg, pMmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, 
527        BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, false)); /* stop DMA on open */
528    BREG_Write32(pMmd->reg, pMmd->baseRegOffset+BCHP_MEM_DMA_0_FIRST_DESC,
529        BCHP_FIELD_DATA(MEM_DMA_0_FIRST_DESC, ADDR, 0));
530    BREG_Write32(pMmd->reg, pMmd->baseRegOffset+BCHP_MEM_DMA_0_WAKE_CTRL,
531        BCHP_FIELD_DATA(MEM_DMA_0_WAKE_CTRL, WAKE_MODE, 1)); /* always use wake_from_last */
532
533#if BMMD_P_HAS_LMC_CORE
534    BREG_Write32(pMmd->reg, BCHP_SHARF_LMC_CORE_SWITCH_MEMC, 
535        BCHP_FIELD_DATA(SHARF_LMC_CORE_SWITCH_MEMC, MUX_SELECT, 1)); /* always point to MEMC, not LMC */
536#endif
537#ifdef BCHP_PWR_RESOURCE_DMA
538    BCHP_PWR_ReleaseResource(pMmd->chp, BCHP_PWR_RESOURCE_DMA);
539#endif
540
541    intr = 0;
542    if (pSettings->engineType==BMMD_EngineType_eDma) {
543        intr = (pSettings->engineIndex==0)?BCHP_INT_ID_MEM_DMA_0_INTR:BCHP_INT_ID_MEM_DMA_1_INTR;
544    }
545    else {
546        intr = (pSettings->engineIndex==0)?BCHP_INT_ID_SHARF_MEM_DMA0_DONE:BCHP_INT_ID_SHARF_MEM_DMA1_DONE;
547    }
548    BDBG_ASSERT(intr);
549
550    rc = BINT_CreateCallback(&pMmd->irq, hInt, intr, BMMD_P_CompleteCallback_isr, pMmd, 0);
551    if (rc != BERR_SUCCESS) { rc = BERR_TRACE(rc); goto error; }
552    rc = BINT_EnableCallback_isr(pMmd->irq);
553    if (rc != BERR_SUCCESS) { rc = BERR_TRACE(rc); goto error; }
554
555    pMmd->qmdConst = BMEM_AllocAligned(pMmd->mem, 4, 0, 0);
556    if (pMmd->qmdConst == NULL) { rc = BERR_TRACE(BERR_OUT_OF_DEVICE_MEMORY); goto error; }
557    rc = BMEM_ConvertAddressToOffset(pMmd->mem, pMmd->qmdConst, &pMmd->qmdConstOffset);
558    if (rc!=BERR_SUCCESS) { rc = BERR_TRACE(rc); goto error; }
559    BDBG_MSG_TRACE(("%#lx: QMD source offset %#lx", pMmd, pMmd->qmdConstOffset));
560    *pMmd->qmdConst = BMMD_P_COMPLETION_CODE;
561
562#if BMMD_MDEBUG_MODE
563    g_debugMem = BMEM_AllocAligned(pMmd->mem, BMMD_MDEBUG_MEM_ENTRY_SIZE*BMMD_MDEBUG_MEM_ENTRIES, 5, 0);
564    BDBG_ASSERT(g_debugMem);
565    rc = BMEM_ConvertAddressToOffset(pMmd->mem, g_debugMem, &g_debugOffset);
566    BKNI_Printf("g_debugMem %x\n", g_debugOffset);
567    BKNI_Memset(g_debugMem, 0, BMMD_MDEBUG_MEM_ENTRY_SIZE*BMMD_MDEBUG_MEM_ENTRIES);
568#endif
569
570    *phMmd = pMmd;
571    return BERR_SUCCESS;
572
573error:
574    if (pMmd != NULL) {
575        if (pMmd->irq) {
576            BINT_DestroyCallback(pMmd->irq);
577        }
578        BMEM_Free(pMmd->mem, pMmd->qmdConst);
579        BKNI_Free(pMmd);
580        *phMmd = NULL;
581    }
582    return rc;
583}
584
585void BMMD_Context_GetDefaultSettings(
586    BMMD_ContextSettings *pSettings
587    )
588{
589    BDBG_ASSERT(pSettings != NULL);
590    BKNI_Memset(pSettings, 0, sizeof(*pSettings));
591    pSettings->maxNumBlocks = 1;
592}
593         
594void BMMD_Context_GetDefaultBlockSettings(
595    BMMD_ContextBlockSettings *pSettings
596    )
597{
598    BDBG_ASSERT(pSettings != NULL);
599    BKNI_Memset(pSettings, 0, sizeof(*pSettings));
600}
601
602static BERR_Code BMMD_Context_P_SetSettings(
603    BMMD_ContextHandle ctx, 
604    const BMMD_ContextSettings *pSettings)
605{
606    unsigned readEndian, swapMode;
607    BMMD_EngineType engineType = ctx->parent->settings.engineType;
608    BDBG_OBJECT_ASSERT(ctx, BMMD_Context);
609
610    /* this only affects non-word aligned data */
611    switch (pSettings->endianMode) {
612        case BMMD_EndianMode_eLittle:
613            readEndian = BCHP_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_LITTLE_ENDIAN;
614            break;
615        case BMMD_EndianMode_eBig:
616            readEndian = BCHP_MEM_DMA_DESC_WORD3_READ_ENDIAN_MODE_BIG_ENDIAN;
617            break;
618        default:
619            return BERR_TRACE(BERR_INVALID_PARAMETER);
620    }
621    switch (pSettings->swapMode) {
622        case BMMD_SwapMode_eNone:
623            swapMode = BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_WORD_ALIGNED;
624            break;
625        case BMMD_SwapMode_eWord:
626            swapMode = BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_HALF_WORD_ALIGNED;
627            break;
628        case BMMD_SwapMode_eByte:
629            swapMode = BCHP_MEM_DMA_DESC_WORD3_WRITE_ENDIAN_XLATE_MODE_BYTE_ALIGNED;
630            break;
631        default:
632            return BERR_TRACE(BERR_INVALID_PARAMETER);
633    }
634    ctx->dmaDesc_3 = 
635        BCHP_FIELD_DATA(MEM_DMA_DESC_WORD3, READ_ENDIAN_MODE,        readEndian) |
636        BCHP_FIELD_DATA(MEM_DMA_DESC_WORD3, WRITE_ENDIAN_XLATE_MODE, swapMode);
637
638    if (engineType==BMMD_EngineType_eDma) {
639        if (pSettings->scramMode >= BMMD_ScramMode_eMax) {
640            return BERR_TRACE(BERR_INVALID_PARAMETER);
641        }
642        ctx->dmaDesc_4 = 
643            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD4, MODE_SEL,   (unsigned)pSettings->scramMode) |
644            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD4, KEY_SELECT, pSettings->keyslot*BMMD_P_KEY_SIZE);
645    }
646    else {
647        unsigned shaContext = 0;
648        if (pSettings->sharf.mode >= BMMD_SharfMode_eMax) {
649            return BERR_TRACE(BERR_INVALID_PARAMETER);
650        }
651        if (pSettings->sharf.shaContext >= 2) {
652            return BERR_TRACE(BERR_INVALID_PARAMETER);
653        }
654        switch (pSettings->sharf.mode) {
655            case BMMD_SharfMode_ePassThrough:
656            case BMMD_SharfMode_eSha1:
657                shaContext = pSettings->sharf.shaContext;
658                break;
659            default:
660                shaContext = 0; /* must be 0 for AES or CMAC */
661                break;
662        }
663
664        ctx->dmaDesc_4 = 
665            BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, USE_BSP_KEY, (unsigned)pSettings->sharf.useBspKey) |
666            BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, MODE_SEL,    (unsigned)pSettings->sharf.mode) |
667            BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, CONTEXT_NUM, shaContext);
668    }
669
670    ctx->settings = *pSettings;
671    return BERR_SUCCESS;
672}
673
674BMMD_ContextHandle BMMD_Context_Create(
675    BMMD_Handle hMmd,
676    const BMMD_ContextSettings *pSettings
677    )
678{
679    BMMD_Context *pCtx;
680    BMMD_ContextSettings ctxSettings;
681    BERR_Code rc;
682
683    BDBG_OBJECT_ASSERT(hMmd, BMMD_Handle_Tag);
684
685    if (pSettings == NULL) {
686        BMMD_Context_GetDefaultSettings(&ctxSettings);
687        pSettings = &ctxSettings;
688    }
689    if (pSettings->maxNumBlocks == 0) { 
690        rc = BERR_TRACE(BERR_INVALID_PARAMETER); 
691        goto err_settings; 
692    }
693    BDBG_ASSERT(pSettings->endianMode < BMMD_EndianMode_eMax);
694    BDBG_ASSERT(pSettings->swapMode < BMMD_SwapMode_eMax);
695    BDBG_ASSERT(pSettings->scramMode < BMMD_ScramMode_eMax);
696
697    pCtx = BKNI_Malloc(sizeof(BMMD_Context));
698    if ( NULL == pCtx ) { rc = BERR_TRACE(BERR_OUT_OF_SYSTEM_MEMORY); goto err_alloc; }
699
700    BKNI_Memset(pCtx, 0, sizeof(BMMD_Context));
701    BDBG_OBJECT_SET(pCtx, BMMD_Context);
702
703    pCtx->parent = hMmd;
704    rc = BMMD_Context_P_SetSettings(pCtx, pSettings);
705    if (rc!=BERR_SUCCESS) { rc = BERR_TRACE(rc); goto err_set_settings; }
706
707    pCtx->firstDesc = BMEM_AllocAligned(hMmd->mem, (1+pCtx->settings.maxNumBlocks)*BMMD_P_DESC_SIZE, 5, 0); /* +1 for QMD. 32 byte aligned */
708    if (pCtx->firstDesc == NULL) { rc = BERR_TRACE(BERR_OUT_OF_DEVICE_MEMORY); goto err_mem_alloc; }
709    rc = BMEM_ConvertAddressToOffset(hMmd->mem, pCtx->firstDesc, &pCtx->firstDescOffset);
710    if (rc!=BERR_SUCCESS) { rc = BERR_TRACE(rc); goto err_mem_alloc; }
711    BDBG_MSG_TRACE(("create: firstDesc %#lx, offset %#lx, blocks %#x", pCtx->firstDesc, pCtx->firstDescOffset, pSettings->maxNumBlocks));
712
713    /* initialize all (maxNumBlocks+1) descriptors */
714    BKNI_Memset(pCtx->firstDesc, 0, (1+pCtx->settings.maxNumBlocks)*BMMD_P_DESC_SIZE);
715#if 0 /* this code is unneeded */
716    unsigned i;
717    uint32_t *descAddr;
718    for (i=0,descAddr=pCtx->firstDesc; i<=pSettings->maxNumBlocks; i++,descAddr+=BMMD_P_DESC_SIZE/sizeof(uint32_t)) {
719        descAddr[2] =
720            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, TRANSFER_SIZE, 0) |
721            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, INTR_ENABLE, false) |
722            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, LAST, true);
723        descAddr[3] =
724            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD3, NEXT_DESC_ADDR, (pCtx->firstDescOffset+BMMD_P_DESC_SIZE)>>BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT); /* this always points to the address of the second descriptor */
725    }
726#endif
727    BLST_S_INSERT_HEAD(&hMmd->ctxList, pCtx, ctxNode);
728
729#if 0 /* for debug */
730    BMMD_Context_P_DescDump(pCtx);
731#endif
732
733#ifdef BCHP_PWR_RESOURCE_DMA
734    BCHP_PWR_AcquireResource(pCtx->parent->chp, BCHP_PWR_RESOURCE_DMA);
735#endif
736    return pCtx;
737
738err_mem_alloc:
739err_set_settings:
740    BKNI_Free(pCtx);
741err_alloc:
742err_settings:
743    return NULL;
744}
745
746BERR_Code BMMD_Context_GetSettings(
747    BMMD_ContextHandle hCtx,
748    BMMD_ContextSettings *pSettings
749    )
750{
751    BDBG_OBJECT_ASSERT(hCtx, BMMD_Context);
752
753    if (hCtx->state == BMMD_Context_P_State_eDestroyed) {
754        return BERR_NOT_SUPPORTED;
755    }
756
757    *pSettings = hCtx->settings;
758    return 0;
759}
760
761BERR_Code BMMD_Context_SetSettings(
762    BMMD_ContextHandle hCtx,
763    const BMMD_ContextSettings *pSettings
764    )
765{
766    BDBG_OBJECT_ASSERT(hCtx, BMMD_Context);
767
768    if (hCtx->state != BMMD_Context_P_State_eIdle) {
769        return BERR_NOT_SUPPORTED;
770    }
771
772    if (pSettings->maxNumBlocks != hCtx->settings.maxNumBlocks) {
773        return BERR_TRACE(BERR_INVALID_PARAMETER);
774    }
775    if (pSettings->memoryBounds.offset != hCtx->settings.memoryBounds.offset ||
776        pSettings->memoryBounds.size != hCtx->settings.memoryBounds.size) {
777        return BERR_TRACE(BERR_INVALID_PARAMETER);
778    }
779   
780    return BMMD_Context_P_SetSettings(hCtx, pSettings);
781}
782
783
784static void
785BMMD_Context_P_NotifyFirst_isr(BMMD_Handle mmd, BMMD_ContextHandle ctx)
786{
787#if BMMD_DEBUG_MODE
788    BDBG_ASSERT(ctx==BLST_SQ_FIRST(&mmd->activeCtxList));
789#endif
790
791#if (!BMMD_P_DMA_REV_2A)
792    if (ctx->sg) {
793        BDBG_ASSERT(mmd->numSgCtx>0);
794        mmd->numSgCtx--;
795        /*BDBG_MSG_TRACE(("%#lx sg %u", ctx->firstDescOffset, mmd->numSgCtx));*/
796    }
797#endif
798   
799    if (ctx->state == BMMD_Context_P_State_eDestroyed) {
800        BLST_SQ_REMOVE_HEAD(&mmd->activeCtxList, activeNode);
801        return;
802    }
803
804    if (ctx->state != BMMD_Context_P_State_eIdle) {
805        ctx->state = BMMD_Context_P_State_eIdle;
806        BLST_SQ_REMOVE_HEAD(&mmd->activeCtxList, activeNode);
807
808        if (ctx->settings.callback_isr) {
809            ctx->settings.callback_isr(ctx->settings.pParm1, ctx->settings.pParm2);
810        }
811    }
812    return;
813}
814
815/* lastDesc is the QMD. go back one descriptor and then go to word 5 */
816#if BMMD_P_DMA_REV_2A
817#define BMMD_Context_P_IsComplete(ctx) (*(ctx->lastDesc-8+5) == BMMD_P_COMPLETION_CODE)
818#else
819bool BMMD_Context_P_IsComplete(BMMD_ContextHandle ctx)
820{
821    uint32_t data = BREG_Read32(ctx->parent->reg, ctx->parent->baseRegOffset+BCHP_MEM_DMA_0_STATUS);
822    data = BCHP_GET_FIELD_DATA(data, MEM_DMA_0_STATUS, DMA_STATUS);
823    if (ctx->sg) {
824        if (data==BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy) {
825            return false;
826        }
827        else {
828            data = BREG_Read32(ctx->parent->reg, ctx->parent->baseRegOffset+BCHP_MEM_DMA_0_CUR_DESC);
829            return (data==ctx->lastDescOffset);
830        }
831    }
832    else {
833        return (*(ctx->lastDesc-8+5) == BMMD_P_COMPLETION_CODE);
834    }
835    return false;
836}
837#endif
838
839void BMMD_P_DumpActiveCtxStatus(BMMD_Handle mmd)
840{
841    BMMD_ContextHandle ctx;
842    bool complete;
843    for (ctx=BLST_SQ_FIRST(&mmd->activeCtxList); ctx; ctx=BLST_SQ_NEXT(ctx, activeNode)) {
844        complete = BMMD_Context_P_IsComplete(ctx);
845        BKNI_Printf("%#lx %d\n", ctx->firstDescOffset, complete);
846    }
847}
848
849/* this function is only called while HW is in SLEEP */
850static void
851BMMD_P_CheckAll_isr(BMMD_Handle mmd)
852{
853    BMMD_ContextHandle ctx;
854
855    while (NULL!=(ctx=BLST_SQ_FIRST(&mmd->activeCtxList))) {
856        if (BMMD_Context_P_IsComplete(ctx)) {
857            BDBG_MSG_TRACE(("%#lx notify from idle", ctx->firstDescOffset));
858            BMMD_Context_P_NotifyFirst_isr(mmd, ctx);
859        }
860        else {
861#if BMMD_DEBUG_MODE
862#if BMMD_P_DMA_REV_2A
863            BKNI_Fail();
864#endif
865#endif
866
867            BDBG_MSG_TRACE(("%#lx restart", ctx->firstDescOffset));
868
869            BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, 
870                BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, false));
871            BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_FIRST_DESC,
872                BCHP_FIELD_DATA(MEM_DMA_0_FIRST_DESC, ADDR, ctx->firstDescOffset));
873            BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, 
874                BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, true));
875            break;
876        }
877    }
878    BDBG_MSG_TRACE(("check done"));
879    return;
880}
881
882/* this function takes into account the fact that we may not receive one interrupt for each context that finished */
883static void
884BMMD_P_UpdateStatus_isr(BMMD_Handle mmd, bool isr)
885{
886    uint32_t data;
887    BMMD_ContextHandle ctx;
888#if (BMMD_P_DMA_REV_2A)
889    BSTD_UNUSED(isr);
890#endif
891
892    data = BREG_Read32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_STATUS);
893    switch (BCHP_GET_FIELD_DATA(data, MEM_DMA_0_STATUS, DMA_STATUS)) {
894        default:
895        case BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Idle:
896        case BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Sleep:
897            BDBG_MSG_TRACE(("update: idle %u", data)); 
898            BMMD_P_CheckAll_isr(mmd);
899            break;
900        case BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy:
901            BDBG_MSG_TRACE(("update: busy")); 
902            while (NULL!=(ctx = BLST_SQ_FIRST(&mmd->activeCtxList))) {
903                if (BMMD_Context_P_IsComplete(ctx)) {
904                    BDBG_MSG_TRACE(("%#lx notify from busy", ctx->firstDescOffset));
905                    BMMD_Context_P_NotifyFirst_isr(mmd, ctx);       
906                }
907                else {
908#if (!BMMD_P_DMA_REV_2A)
909                    /* there appears to be a race condition where, when two sgScram jobs are queued in quick succession,
910                       an ISR fires while the HW is busy. there could be two possibilities:
911                       a) the first job's ISR fired late, i.e. after the second job was restarted. but the second job's ISR does not fire later for whatever reason.
912                       b) the second job's ISR fired while the HW is still busy.
913                       the code below handles this condition */
914                    if (isr && ctx->sg) {
915                        BKNI_Delay(1);
916                        data = BREG_Read32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_STATUS);
917                        if (BCHP_GET_FIELD_DATA(data, MEM_DMA_0_STATUS, DMA_STATUS)==BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Sleep) {
918                            BDBG_MSG(("%#lx UpdateStatus while busy->sleep", ctx->firstDescOffset));
919                            BMMD_P_CheckAll_isr(mmd);
920                        }
921                    }
922#endif
923                    break;
924                }
925            }
926            break;
927    }
928    return;
929}
930
931static void BMMD_P_CompleteCallback_isr(void *pParam1, int parm2)
932{
933    BMMD_Handle mmd = (BMMD_Handle)pParam1;
934    BSTD_UNUSED(parm2);
935
936    BDBG_OBJECT_ASSERT(mmd, BMMD_Handle_Tag);
937    BDBG_MSG_TRACE(("ISR"));
938#if 0
939    BDBG_MSG_TRACE(("CUR_DESC %#lx", BREG_Read32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_CUR_DESC)));
940#endif
941    BMMD_P_UpdateStatus_isr(mmd, true);
942    return;
943}
944
945static BERR_Code
946BMMD_Context_P_Start(BMMD_Handle mmd, BMMD_ContextHandle ctx)
947{
948    uint32_t data;
949    unsigned status;
950    BMMD_ContextHandle lastCtx = mmd->lastQueuedCtx;
951    uint32_t qmdOffset = ctx->lastDescOffset; /* offset of QMD. the magic number is written to word[5] of previous desc */
952    bool empty = (BLST_SQ_FIRST(&mmd->activeCtxList)==NULL);
953
954    BSTD_UNUSED(qmdOffset); /* silence unused variable warning */
955    mmd->lastQueuedCtx = ctx;
956    data = BREG_Read32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_STATUS);
957    status = BCHP_GET_FIELD_DATA(data, MEM_DMA_0_STATUS, DMA_STATUS);
958
959    if (empty || !lastCtx) {
960#if BMMD_DEBUG_MODE
961        BDBG_ASSERT(status != BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy); /* empty has to imply idle/sleep */
962        BDBG_ASSERT(mmd->numSgCtx==0);
963#else
964        if (status == BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy) {
965            BDBG_WRN(("%#lx start while BUSY", ctx->firstDescOffset));
966            /* activeCtxList may need to be cleared, if we got here via (!lastCtx) */
967        }
968        if (mmd->numSgCtx) {
969            BDBG_WRN(("%#lx start with sg %u", ctx->firstDescOffset, mmd->numSgCtx));
970            mmd->numSgCtx = 0;
971        }
972#endif
973
974        BDBG_MSG_TRACE(("%#lx start  %u, QMD %#lx", ctx->firstDescOffset, status, qmdOffset));
975        BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, 
976            BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, false));
977        BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_FIRST_DESC,
978            BCHP_FIELD_DATA(MEM_DMA_0_FIRST_DESC, ADDR, ctx->firstDescOffset));
979        BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, 
980            BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, true));
981
982        return BERR_SUCCESS;
983    }
984    else {
985        uint32_t data = lastCtx->dmaDesc_3;
986
987        if (mmd->numSgCtx==0) { /* always true for 40nm platforms */
988            BDBG_MSG_TRACE(("%#lx link   %u, QMD %#lx", ctx->firstDescOffset, status, qmdOffset));
989            /* update NEXT_DESC_ADDR, clear LAST in previous descriptor and hit WAKE, in that order */
990            lastCtx->lastDesc[3] = data | BCHP_FIELD_DATA(MEM_DMA_DESC_WORD3, NEXT_DESC_ADDR, ctx->firstDescOffset>>BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT);
991            lastCtx->lastDesc[2] &= ~(BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, LAST, 1));
992            BREG_Write32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_WAKE_CTRL,
993                BCHP_FIELD_DATA(MEM_DMA_0_WAKE_CTRL, WAKE, 1));
994        }
995        else {
996            BDBG_MSG_TRACE(("%#lx postpone, QMD %#lx", ctx->firstDescOffset, qmdOffset));
997        }
998
999        /* mmd->numSgCtx++ takes place in Enqueue() */
1000        return BMMD_QUEUED;
1001    }
1002}
1003
1004static BERR_Code BMMD_Context_P_PrepareBlocks(
1005    BMMD_ContextHandle ctx, 
1006    const BMMD_ContextBlockSettings *pSettings,
1007    unsigned numBlocks
1008    )
1009{
1010    unsigned i;
1011    volatile uint32_t *descAddr;
1012    uint32_t nextDescOffset, data;
1013    unsigned dmaLength;
1014    bool sg;
1015    BMMD_EngineType engineType = ctx->parent->settings.engineType;
1016    BMMD_ContextHandle context;
1017   
1018    ctx->sg = false;
1019
1020    for (
1021        dmaLength=0,
1022        sg = false,
1023        i=numBlocks,
1024        nextDescOffset=ctx->firstDescOffset>>BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT,
1025        descAddr=ctx->firstDesc;
1026        ;
1027        pSettings++
1028        ) {
1029
1030        if (pSettings->size>BMMD_P_MAX_TRANSFER_SIZE) {
1031            return BERR_TRACE(BERR_INVALID_PARAMETER);
1032        }
1033
1034#if BMMD_P_DESC_PROTECT
1035        /* check if queued descriptor reads from / writes to memory locations where BMMD contexts' descriptors are located */
1036        for (context=BLST_S_FIRST(&ctx->parent->ctxList); context; context=BLST_S_NEXT(context, ctxNode)) {
1037            uint32_t beg = context->firstDescOffset; /* inclusive */
1038            uint32_t end = context->firstDescOffset + BMMD_P_DESC_SIZE*(context->settings.maxNumBlocks+1); /* not inclusive. don't use lastDescOffset because a) it's not populated yet, b) it's dependent on how many descs you queue */
1039
1040            if (pSettings->src >= beg && pSettings->src < end) {
1041                BDBG_WRN(("%#lx: src %#lx reads from BMMD private descriptor memory location", ctx->firstDescOffset, pSettings->src));
1042                /* it's a warning, but continue */
1043            }
1044
1045            if (pSettings->dst >= beg && pSettings->dst < end) {
1046                BDBG_ERR(("%#lx: dst %#lx writes to BMMD private descriptor memory location", ctx->firstDescOffset, pSettings->dst));
1047                return BERR_TRACE(BERR_INVALID_PARAMETER);
1048            }
1049        }
1050        /* do a similar check for the QMD const location */
1051        if (pSettings->dst >= ctx->parent->qmdConstOffset && pSettings->dst < ctx->parent->qmdConstOffset+4) {
1052            BDBG_ERR(("%#lx: dst %#lx writes to BMMD private const memory location", ctx->firstDescOffset, pSettings->dst));
1053            return BERR_TRACE(BERR_INVALID_PARAMETER);
1054        }
1055#endif
1056
1057        if (ctx->settings.memoryBounds.offset && ctx->settings.memoryBounds.size) {
1058            uint32_t b_offset = ctx->settings.memoryBounds.offset;
1059            unsigned b_size = ctx->settings.memoryBounds.size;
1060           
1061            if (pSettings->src < b_offset || (pSettings->src+pSettings->size > b_offset+b_size)) {
1062                BDBG_ERR(("%#lx: src violates memory bounds %#lx:%#x %#lx:%#x", ctx->firstDescOffset,
1063                    pSettings->src, pSettings->size, b_offset, b_size));
1064                return BERR_INVALID_PARAMETER;
1065            }
1066           
1067            if (pSettings->dst < b_offset || (pSettings->dst+pSettings->size > b_offset+b_size)) {
1068                BDBG_ERR(("%#lx: dst violates memory bounds %#lx:%#x %#lx:%#x", ctx->firstDescOffset,
1069                    pSettings->dst, pSettings->size, b_offset, b_size));
1070                return BERR_INVALID_PARAMETER;
1071            }
1072        }
1073       
1074        dmaLength += pSettings->size;
1075        nextDescOffset+=(BMMD_P_DESC_SIZE>>BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT);
1076       
1077        descAddr[0] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD0, READ_ADDR, pSettings->src);
1078        descAddr[1] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD1, WRITE_ADDR, pSettings->dst);
1079        descAddr[2] = 
1080            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, TRANSFER_SIZE, pSettings->size) |
1081            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, INTR_ENABLE, false) |
1082            BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, LAST, false);
1083        descAddr[3] = ctx->dmaDesc_3 | BCHP_FIELD_DATA( MEM_DMA_DESC_WORD3, NEXT_DESC_ADDR, nextDescOffset);
1084        descAddr[4] = ctx->dmaDesc_4;
1085
1086        if (engineType==BMMD_EngineType_eDma && ctx->settings.scramMode>BMMD_ScramMode_eNone) {
1087            data = ctx->dmaDesc_4;
1088            if (pSettings->resetCrypto) {
1089                data |= BCHP_FIELD_DATA(MEM_DMA_DESC_WORD4, ENC_DEC_INIT, 1);
1090            }
1091            if (pSettings->sgScramStart) {
1092                data |= BCHP_FIELD_DATA(MEM_DMA_DESC_WORD4, SG_SCRAM_START, 1);
1093                sg = true;
1094                ctx->sg = true;
1095            }
1096            if (sg) { 
1097                /* SG_ENABLE must only be 1 between START and END.
1098                   not only is this the cause of HW hangs (65nm platforms), there is also a performance penalty.
1099                   further, with SG, the CUR_DESC can no longer reliably used as a status register. however, in our
1100                   current usage of the HW, this doesn't matter; we will only start running into problems if
1101                   INTR_ENABLE is true in the middle of a SG crypto chain */
1102                data |= BCHP_FIELD_DATA(MEM_DMA_DESC_WORD4, SG_ENABLE, 1);
1103            }
1104            if (pSettings->sgScramEnd) {
1105                data |= BCHP_FIELD_DATA(MEM_DMA_DESC_WORD4, SG_SCRAM_END, 1);
1106                sg = false;
1107#if (!BMMD_P_DMA_REV_2A)
1108                if (i!=1) {
1109                    BDBG_ERR(("SG crypto to clear transition within a single chain is not supported on this platform"));
1110                    return BERR_INVALID_PARAMETER;
1111                }
1112#endif
1113            }
1114            descAddr[4] = data;
1115        }
1116        else if (engineType==BMMD_EngineType_eSharf && ctx->settings.sharf.mode>BMMD_SharfMode_ePassThrough) {
1117            data = ctx->dmaDesc_4;
1118            data |= BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, KEY_PRESENT,    (unsigned)pSettings->sharf.keyPresent);
1119            data |= BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, DIGEST_PRESENT, (unsigned)pSettings->sharf.digestPresent);
1120            if (pSettings->sgScramStart) {
1121                data |= BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, SG_SCRAM_START, 1);
1122                sg = true;
1123                ctx->sg = true;
1124            }
1125            if (sg) { 
1126                data |= BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, SG_ENABLE, 1);
1127            }
1128            if (pSettings->sgScramEnd) {
1129                data |= BCHP_FIELD_DATA(SHARF_MEM_DMA_DESC_WORD4, SG_SCRAM_END, 1);
1130                sg = false;
1131#if (!BMMD_P_DMA_REV_2A)
1132                if (i!=1) {
1133                    BDBG_ERR(("SG crypto to clear transition within a single chain is not supported on this platform"));
1134                    return BERR_INVALID_PARAMETER;
1135                }
1136#endif
1137            }
1138            descAddr[4] = data;
1139        }
1140       
1141        descAddr[5] = 0; /* QMD destination must be cleared */
1142
1143        descAddr+=BMMD_P_DESC_SIZE/sizeof(uint32_t);
1144        i--;
1145        if (i<=0) {
1146            /* save length of DMA transaction */
1147            ctx->dmaLength = dmaLength;
1148            /* save address of nextDescOffset for further use in range check */
1149            ctx->lastDescOffset = (nextDescOffset<<BCHP_MEM_DMA_DESC_WORD3_NEXT_DESC_ADDR_SHIFT);
1150            /* save address of lastDesc for further use in linking */
1151            ctx->lastDesc = descAddr;
1152
1153            /* if non-40nm platform AND sgScram descriptor, then use empty descriptor at end. otherwise, use QMD */
1154#if (!BMMD_P_DMA_REV_2A)
1155            if (ctx->sg) {
1156                data = ctx->dmaDesc_3;
1157                /* create an empty descriptor at the end of transaction */
1158                    descAddr[0] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD0, READ_ADDR, nextDescOffset);
1159                    descAddr[1] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD1, WRITE_ADDR, nextDescOffset);
1160                descAddr[2] = 
1161                    BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, TRANSFER_SIZE, 0) |
1162                    BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, INTR_ENABLE, true) |
1163                    BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, LAST, true);
1164                descAddr[3] = data | BCHP_FIELD_DATA( MEM_DMA_DESC_WORD3, NEXT_DESC_ADDR, nextDescOffset);
1165                descAddr[4] = 0;
1166                break;
1167            }
1168#endif
1169
1170            /* QMD */
1171            descAddr[0] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD0, READ_ADDR, ctx->parent->qmdConstOffset);
1172            descAddr[1] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD1, WRITE_ADDR, ctx->lastDescOffset-BMMD_P_DESC_SIZE+(4*5)); /* DESC_WORD5 of last descriptor */
1173            descAddr[2] = 
1174                BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, TRANSFER_SIZE, 4) |
1175                BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, INTR_ENABLE, true) |
1176                BCHP_FIELD_DATA(MEM_DMA_DESC_WORD2, LAST, true);
1177            descAddr[3] = BCHP_FIELD_DATA(MEM_DMA_DESC_WORD3, NEXT_DESC_ADDR, nextDescOffset); /* WRITE_ENDIAN_XLATE mode has to be fixed. READ_ENDIAN_MODE does not matter */
1178            descAddr[4] = 0; /* not sgScram */
1179            descAddr[5] = 0;
1180            break;
1181        }
1182    }
1183    return BERR_SUCCESS;
1184}
1185
1186
1187/* this is the only function that triggers BLST_SQ_INSERT_TAIL */
1188BERR_Code BMMD_Context_Enqueue(
1189    BMMD_ContextHandle hCtx,
1190    const BMMD_ContextBlockSettings *pSettings,
1191    unsigned numBlocks
1192    )
1193{
1194    BERR_Code rc;
1195    BMMD_Handle mmd;
1196    uint32_t data;
1197
1198    if (hCtx->parent->standby) {
1199        return BERR_TRACE(BERR_NOT_SUPPORTED);
1200    }
1201
1202    if (numBlocks==0 || numBlocks > hCtx->settings.maxNumBlocks ) {
1203        BDBG_ERR(("numBlocks %u out of range - max=%u", numBlocks, hCtx->settings.maxNumBlocks));
1204        return BERR_TRACE(BERR_INVALID_PARAMETER);
1205    }
1206
1207    /* a single context can only be enqueued once */
1208    if (hCtx->state != BMMD_Context_P_State_eIdle) {
1209        return BERR_TRACE(BERR_NOT_SUPPORTED);
1210    }
1211    if (pSettings==NULL) {
1212        return BERR_TRACE(BERR_INVALID_PARAMETER);
1213    }
1214   
1215    rc = BMMD_Context_P_PrepareBlocks(hCtx, pSettings, numBlocks);
1216    if (rc!=BERR_SUCCESS) {
1217        return BERR_TRACE(BERR_INVALID_PARAMETER);
1218    }
1219
1220#if 0 /* for debug */
1221    BMMD_Context_P_DescDump(hCtx, numBlocks);
1222#endif
1223
1224    mmd = hCtx->parent;
1225    BKNI_EnterCriticalSection();
1226    (void)BMMD_Context_P_Start(mmd, hCtx);
1227
1228    hCtx->state = BMMD_Context_P_State_eQueued;
1229    data = BREG_Read32(mmd->reg, mmd->baseRegOffset+BCHP_MEM_DMA_0_STATUS);
1230    switch (BCHP_GET_FIELD_DATA(data, MEM_DMA_0_STATUS, DMA_STATUS)) {
1231        case BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Busy:
1232#if (!BMMD_P_DMA_REV_2A)
1233            if (hCtx->sg) {
1234                mmd->numSgCtx++;
1235            }
1236#endif
1237            BLST_SQ_INSERT_TAIL(&mmd->activeCtxList, hCtx, activeNode);
1238            break;
1239        case BCHP_MEM_DMA_0_STATUS_DMA_STATUS_Sleep:
1240            if (BMMD_Context_P_IsComplete(hCtx)) {
1241                BDBG_MSG_TRACE(("%#lx slow/fast complete", hCtx->firstDescOffset));
1242                hCtx->state = BMMD_Context_P_State_eIdle; /* don't even need to add into the list */
1243                BKNI_LeaveCriticalSection();
1244                goto completed;
1245            }
1246            /* keep going */
1247        default:
1248            /* add into the list and run normal completion sequence */
1249            BDBG_MSG_TRACE(("%#lx fastcheck", hCtx->firstDescOffset));
1250#if BMMD_DEBUG_MODE
1251#if BMMD_P_DMA_REV_2A
1252            BKNI_Fail();
1253#endif
1254#endif
1255#if (!BMMD_P_DMA_REV_2A)
1256            if (hCtx->sg) {
1257                mmd->numSgCtx++;
1258            }
1259#endif
1260            BLST_SQ_INSERT_TAIL(&mmd->activeCtxList, hCtx, activeNode);
1261            BMMD_P_CheckAll_isr(mmd);
1262            break;
1263    }
1264    BKNI_LeaveCriticalSection();
1265    return BMMD_QUEUED;
1266
1267completed:
1268    return BERR_SUCCESS;
1269}
1270
1271BERR_Code BMMD_Context_GetStatus(
1272    BMMD_ContextHandle hCtx,
1273    BMMD_ContextStatus *pStatus
1274    )
1275{
1276    BDBG_OBJECT_ASSERT(hCtx, BMMD_Context);
1277    BDBG_ASSERT(NULL != pStatus);
1278
1279    if (hCtx->state == BMMD_Context_P_State_eDestroyed) {
1280        return BERR_NOT_SUPPORTED;
1281    }
1282
1283    if (hCtx->state != BMMD_Context_P_State_eIdle) {
1284        BKNI_EnterCriticalSection();
1285        BDBG_MSG_TRACE(("UpdateStatus"));
1286        BMMD_P_UpdateStatus_isr(hCtx->parent, false);
1287        BKNI_LeaveCriticalSection();
1288    }
1289    if (hCtx->state == BMMD_Context_P_State_eIdle) {
1290        pStatus->state = BMMD_ContextState_eIdle;
1291    } 
1292    else {
1293        pStatus->state = BMMD_ContextState_eInProgress;
1294    }
1295    return BERR_SUCCESS;
1296}
1297
1298void BMMD_Context_P_Destroy(
1299    BMMD_ContextHandle hCtx
1300    )
1301{
1302    if (hCtx->parent->lastQueuedCtx == hCtx) { /* BMMD_Context_P_Start() may try to use lastQueuedCtx, so clear it */
1303        hCtx->parent->lastQueuedCtx = NULL;
1304    }
1305    BDBG_MSG_TRACE(("%#lx destroy", hCtx));
1306    BLST_S_REMOVE(&(hCtx->parent->ctxList), hCtx, BMMD_Context, ctxNode);
1307    BMEM_Free(hCtx->parent->mem, hCtx->firstDesc);
1308#ifdef BCHP_PWR_RESOURCE_DMA
1309    BCHP_PWR_ReleaseResource(hCtx->parent->chp, BCHP_PWR_RESOURCE_DMA);
1310#endif
1311    BDBG_OBJECT_DESTROY(hCtx, BMMD_Context);
1312    BKNI_Free(hCtx);
1313}
1314
1315void BMMD_Context_Destroy(
1316    BMMD_ContextHandle hCtx
1317    )
1318{
1319    BMMD_ContextStatus status;
1320    BMMD_Context_GetStatus(hCtx, &status);
1321   
1322    /* if not idle, then mark it as destroyed, but defer the actual destroy to BMMD_Close() */
1323    if (hCtx->state!=BMMD_Context_P_State_eIdle) {
1324        BDBG_ERR(("BMMD_Context_Destroy: %#lx is busy, leaking context", (unsigned long)hCtx));
1325        /* when marked as destroyed, as long as the context's descriptor memory is alive,
1326           BMMD can figure out the completion status and remove it from activeCtxList, if needed */
1327        hCtx->state = BMMD_Context_P_State_eDestroyed;
1328    }
1329    else {
1330        /* otherwise, really destroy */
1331        BMMD_Context_P_Destroy(hCtx);
1332    }
1333}
1334
1335void BMMD_Close(
1336    BMMD_Handle hMmd
1337    )
1338{
1339    BMMD_ContextHandle ctx;
1340    BDBG_OBJECT_ASSERT(hMmd, BMMD_Handle_Tag);
1341
1342    /* wait for pending operations to finish */
1343    if (BLST_SQ_FIRST(&hMmd->activeCtxList)) { /* Context_Destroy() never actually destroys any contexts that are part of activeCtxList */
1344        BDBG_WRN(("MMD still busy. Waiting before forced stop"));
1345        BKNI_Sleep(100);
1346
1347        /* stop HW. powering down the DMA clock while the HW is running can lead to an unrecoverable HW state  */
1348        BREG_Write32(hMmd->reg, hMmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, false));
1349    }
1350
1351    BINT_DestroyCallback(hMmd->irq);
1352
1353    /* all pending operations are done. shutdown */
1354    while ((ctx = BLST_S_FIRST(&hMmd->ctxList))) {
1355        BDBG_WRN(("BMMD_Close: stale context %#lx (%s)", (unsigned long)ctx, ctx->state==BMMD_Context_P_State_eIdle?"idle":"active"));
1356        BMMD_Context_P_Destroy(ctx);
1357    }
1358
1359#if BMMD_MDEBUG_MODE
1360    BMEM_Free(hMmd->mem, g_debugMem);
1361#endif
1362    BMEM_Free(hMmd->mem, hMmd->qmdConst);
1363    BDBG_OBJECT_UNSET(hMmd, BMMD_Handle_Tag);
1364    BKNI_Free(hMmd);
1365}
1366
1367BERR_Code BMMD_Standby(
1368    BMMD_Handle hMmd,
1369    const BMMD_StandbySettings *pSettings
1370    )
1371{   
1372    BERR_Code rc;
1373    BSTD_UNUSED(pSettings);
1374    BDBG_MSG(("BMMD_Standby: %s %u", hMmd->settings.engineType==BMMD_EngineType_eDma?"DMA":"SHARF", hMmd->settings.engineIndex));
1375   
1376    if (hMmd->standby) {
1377        /* handle it silently */
1378        BDBG_MSG(("Already in standby"));
1379        return BERR_INVALID_PARAMETER;
1380    }
1381    /* cannot enter standby while mmd is busy */
1382    if (BLST_SQ_FIRST(&hMmd->activeCtxList)) {
1383        return BERR_TRACE(BERR_NOT_SUPPORTED);
1384    }
1385
1386    rc = BINT_DisableCallback_isr(hMmd->irq);
1387    if (rc != BERR_SUCCESS) { rc = BERR_TRACE(rc); /* keep going */ }
1388
1389#ifdef BCHP_PWR_RESOURCE_DMA
1390{
1391    BMMD_ContextHandle context;
1392    for (context=BLST_S_FIRST(&hMmd->ctxList); context; context=BLST_S_NEXT(context, ctxNode)) {
1393        /* DMA must be shutdown */
1394        BCHP_PWR_ReleaseResource(hMmd->chp, BCHP_PWR_RESOURCE_DMA);
1395    }
1396}
1397#endif
1398    hMmd->standby = true;
1399    /* no state to remember / restore */
1400    return BERR_SUCCESS;
1401}
1402
1403void BMMD_Resume(
1404    BMMD_Handle hMmd
1405    )
1406{
1407    BERR_Code rc;
1408    BDBG_MSG(("BMMD_Resume: %s %u", hMmd->settings.engineType==BMMD_EngineType_eDma?"DMA":"SHARF", hMmd->settings.engineIndex));
1409    if (!hMmd->standby) {
1410        /* handle it silently */
1411        BDBG_MSG(("Not in standby"));
1412        return;
1413    }
1414
1415#ifdef BCHP_PWR_RESOURCE_DMA
1416{
1417    BMMD_ContextHandle context;
1418    for (context=BLST_S_FIRST(&hMmd->ctxList); context; context=BLST_S_NEXT(context, ctxNode)) {
1419        BCHP_PWR_AcquireResource(hMmd->chp, BCHP_PWR_RESOURCE_DMA);
1420    }
1421}
1422#endif
1423
1424    /* replicate the register writes done in BMMD_Open */
1425#ifdef BCHP_PWR_RESOURCE_DMA
1426    /* must acquire again in case there were no contexts open */
1427    BCHP_PWR_AcquireResource(hMmd->chp, BCHP_PWR_RESOURCE_DMA);
1428#endif
1429    BREG_Write32(hMmd->reg, hMmd->baseRegOffset+BCHP_MEM_DMA_0_CTRL, 
1430        BCHP_FIELD_DATA(MEM_DMA_0_CTRL, RUN, false)); /* stop DMA on open */
1431    BREG_Write32(hMmd->reg, hMmd->baseRegOffset+BCHP_MEM_DMA_0_FIRST_DESC,
1432        BCHP_FIELD_DATA(MEM_DMA_0_FIRST_DESC, ADDR, 0));
1433    BREG_Write32(hMmd->reg, hMmd->baseRegOffset+BCHP_MEM_DMA_0_WAKE_CTRL,
1434        BCHP_FIELD_DATA(MEM_DMA_0_WAKE_CTRL, WAKE_MODE, 1)); /* always use wake_from_last */
1435#if BMMD_P_HAS_LMC_CORE
1436    BREG_Write32(hMmd->reg, BCHP_SHARF_LMC_CORE_SWITCH_MEMC, 
1437        BCHP_FIELD_DATA(SHARF_LMC_CORE_SWITCH_MEMC, MUX_SELECT, 1)); /* always point to MEMC, not LMC */
1438#endif
1439
1440    rc = BINT_EnableCallback_isr(hMmd->irq);
1441    if (rc != BERR_SUCCESS) { rc = BERR_TRACE(rc); /* keep going */ }
1442
1443#ifdef BCHP_PWR_RESOURCE_DMA
1444    BCHP_PWR_ReleaseResource(hMmd->chp, BCHP_PWR_RESOURCE_DMA);
1445#endif
1446
1447    hMmd->standby = false;
1448    return;
1449}
1450
1451#if BMMD_MDEBUG_MODE
1452void BMMD_MDEBUG_Flush()
1453{
1454    BKNI_Memset(g_debugMem, 0, BMMD_MDEBUG_MEM_ENTRY_SIZE*BMMD_MDEBUG_MEM_ENTRIES);
1455    g_current = 0;
1456}
1457#endif
1458
1459void BMMD_Context_P_DescDump(
1460    BMMD_ContextHandle ctx,
1461    unsigned numBlocks
1462    )
1463{
1464    unsigned i;
1465    uint32_t *descAddr, descOffset;
1466
1467    for (i=0,descAddr=ctx->firstDesc; i<=numBlocks; i++,descAddr+=BMMD_P_DESC_SIZE/sizeof(uint32_t)) {
1468        descOffset = ctx->firstDescOffset+BMMD_P_DESC_SIZE*i;
1469        BKNI_Printf("desc[%d:%08x] = %08x, %08x, %08x, %08x, %08x\n", 
1470            i, descOffset,
1471            descAddr[0], descAddr[1], descAddr[2], descAddr[3], descAddr[4]);
1472    }
1473    BKNI_Printf("\n");
1474}
1475
Note: See TracBrowser for help on using the repository browser.