Import Annapurna Labs Alpine HAL for networking

Files required for the NIC driver

Import from vendor-sys/alpine-hal/2.7
SVN rev.: 294828
HAL version: 2.7

Obtained from:  Semihalf
Sponsored by:   Annapurna Labs
This commit is contained in:
Zbigniew Bodek 2016-01-26 15:22:04 +00:00
parent c7c96d1093
commit 49b49cda41
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=294838
26 changed files with 28017 additions and 0 deletions

View file

@ -0,0 +1,291 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_iofic.c
*
* @brief interrupt controller hal
*
*/
#include "al_hal_iofic.h"
#include "al_hal_iofic_regs.h"
/*
* configure the interrupt registers, interrupts will are kept masked
*/
int al_iofic_config(void __iomem *regs_base, int group, uint32_t flags)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
al_reg_write32(&regs->ctrl[group].int_control_grp, flags);
return 0;
}
/*
* configure the moderation timer resolution for a given group
*/
int al_iofic_moder_res_config(void __iomem *regs_base, int group,
uint8_t resolution)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg;
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
reg = al_reg_read32(&regs->ctrl[group].int_control_grp);
AL_REG_FIELD_SET(reg,
INT_CONTROL_GRP_MOD_RES_MASK,
INT_CONTROL_GRP_MOD_RES_SHIFT,
resolution);
al_reg_write32(&regs->ctrl[group].int_control_grp, reg);
return 0;
}
/*
* configure the moderation timer interval for a given legacy interrupt group
*/
int al_iofic_legacy_moder_interval_config(void __iomem *regs_base, int group,
uint8_t interval)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg;
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
reg = al_reg_read32(&regs->ctrl[group].int_control_grp);
AL_REG_FIELD_SET(reg,
INT_CONTROL_GRP_MOD_INTV_MASK,
INT_CONTROL_GRP_MOD_INTV_SHIFT,
interval);
al_reg_write32(&regs->ctrl[group].int_control_grp, reg);
return 0;
}
/*
* configure the moderation timer interval for a given msix vector.
*/
int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group,
uint8_t vector, uint8_t interval)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg;
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
reg = al_reg_read32(&regs->grp_int_mod[group][vector].grp_int_mod_reg);
AL_REG_FIELD_SET(reg,
INT_MOD_INTV_MASK,
INT_MOD_INTV_SHIFT,
interval);
al_reg_write32(&regs->grp_int_mod[group][vector].grp_int_mod_reg, reg);
return 0;
}
/*
* configure the vmid attributes for a given msix vector.
*/
int al_iofic_msix_vmid_attributes_config(void __iomem *regs_base, int group,
uint8_t vector, uint32_t vmid, uint8_t vmid_en)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg = 0;
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
AL_REG_FIELD_SET(reg,
INT_MSIX_VMID_MASK,
INT_MSIX_VMID_SHIFT,
vmid);
AL_REG_BIT_VAL_SET(reg,
INT_MSIX_VMID_EN_SHIFT,
vmid_en);
al_reg_write32(&regs->grp_int_mod[group][vector].grp_int_vmid_reg, reg);
return 0;
}
/*
* return the offset of the unmask register for a given group
*/
uint32_t __iomem * al_iofic_unmask_offset_get(void __iomem *regs_base, int group)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
return &regs->ctrl[group].int_mask_clear_grp;
}
/*
* unmask specific interrupts for a given group
*/
void al_iofic_unmask(void __iomem *regs_base, int group, uint32_t mask)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
/*
* use the mask clear register, no need to read the mask register
* itself. write 0 to unmask, 1 has no effect
*/
al_reg_write32_relaxed(&regs->ctrl[group].int_mask_clear_grp, ~mask);
}
/*
* mask specific interrupts for a given group
*/
void al_iofic_mask(void __iomem *regs_base, int group, uint32_t mask)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg;
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
reg = al_reg_read32(&regs->ctrl[group].int_mask_grp);
al_reg_write32(&regs->ctrl[group].int_mask_grp, reg | mask);
}
/*
* read the mask for a given group
*/
uint32_t al_iofic_read_mask(void __iomem *regs_base, int group)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
return al_reg_read32(&regs->ctrl[group].int_mask_grp);
}
/*
* read interrupt cause register for a given group
*/
uint32_t al_iofic_read_cause(void __iomem *regs_base, int group)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
return al_reg_read32(&regs->ctrl[group].int_cause_grp);
}
/*
* clear bits in the interrupt cause register for a given group
*/
void al_iofic_clear_cause(void __iomem *regs_base, int group, uint32_t mask)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
/* inverse mask, writing 1 has no effect */
al_reg_write32(&regs->ctrl[group].int_cause_grp, ~mask);
}
/*
* Set the cause register for a given group
*/
void al_iofic_set_cause(void __iomem *regs_base, int group, uint32_t mask)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
al_reg_write32(&regs->ctrl[group].int_cause_set_grp, mask);
}
/*
* unmask specific interrupts from aborting the udma a given group
*/
void al_iofic_abort_mask(void __iomem *regs_base, int group, uint32_t mask)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
al_reg_write32(&regs->ctrl[group].int_abort_msk_grp, mask);
}
/*
* trigger all interrupts that are waiting for moderation timers to expire
*/
void al_iofic_interrupt_moderation_reset(void __iomem *regs_base, int group)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg = 0;
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
al_assert(regs_base);
al_assert(group < AL_IOFIC_MAX_GROUPS);
reg = al_reg_read32(&regs->ctrl[group].int_control_grp);
reg |= INT_CONTROL_GRP_MOD_RST;
al_reg_write32(&regs->ctrl[group].int_control_grp, reg);
}
/** @} end of interrupt controller group */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,750 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __AL_SERDES_INTERNAL_REGS_H__
#define __AL_SERDES_INTERNAL_REGS_H__
#ifdef __cplusplus
extern "C" {
#endif
/*******************************************************************************
* Per lane register fields
******************************************************************************/
/*
* RX and TX lane hard reset
* 0 - Hard reset is asserted
* 1 - Hard reset is de-asserted
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK 0x01
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT 0x01
/*
* RX and TX lane hard reset control
* 0 - Hard reset is taken from the interface pins
* 1 - Hard reset is taken from registers
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK 0x02
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_IFACE 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS 0x02
/* RX lane power state control */
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM 3
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK 0x1f
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD 0x01
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2 0x02
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1 0x04
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S 0x08
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0 0x10
/* TX lane power state control */
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM 4
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK 0x1f
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD 0x01
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2 0x02
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1 0x04
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S 0x08
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0 0x10
/* RX lane word width */
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM 5
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK 0x07
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_8 0x00
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_10 0x01
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_16 0x02
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 0x03
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_32 0x04
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_40 0x05
/* TX lane word width */
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM 5
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK 0x70
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_8 0x00
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_10 0x10
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_16 0x20
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20 0x30
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_32 0x40
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_40 0x50
/* RX lane rate select */
#define SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM 6
#define SERDES_IREG_FLD_PCSRX_DIVRATE_MASK 0x07
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8 0x00
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4 0x01
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2 0x02
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1 0x03
/* TX lane rate select */
#define SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM 6
#define SERDES_IREG_FLD_PCSTX_DIVRATE_MASK 0x70
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8 0x00
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4 0x10
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2 0x20
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1 0x30
/*
* PMA serial RX-to-TX loop-back enable (from AGC to IO Driver). Serial receive
* to transmit loopback: 0 - Disables loopback 1 - Transmits the untimed,
* partial equalized RX signal out the transmit IO pins
*/
#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN 0x10
/*
* PMA TX-to-RX buffered serial loop-back enable (bypasses IO Driver). Serial
* transmit to receive buffered loopback: 0 - Disables loopback 1 - Loops back
* the TX serializer output into the CDR
*/
#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN 0x20
/*
* PMA TX-to-RX I/O serial loop-back enable (loop back done directly from TX to
* RX pads). Serial IO loopback from the transmit lane IO pins to the receive
* lane IO pins: 0 - Disables loopback 1 - Loops back the driver IO signal to
* the RX IO pins
*/
#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN 0x40
/*
* PMA Parallel RX-to-TX loop-back enable. Parallel loopback from the PMA
* receive lane 20-bit data ports, to the transmit lane 20-bit data ports 0 -
* Disables loopback 1 - Loops back the 20-bit receive data port to the
* transmitter
*/
#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN 0x80
/*
* PMA CDR recovered-clock loopback enable; asserted when PARRX2TXTIMEDEN is 1.
* Transmit bit clock select: 0 - Selects synthesizer bit clock for transmit 1
* - Selects CDR clock for transmit
*/
#define SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_CDRCLK2TXEN 0x01
/* Receive lane BIST enable. Active High */
#define SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM 8
#define SERDES_IREG_FLD_PCSRXBIST_EN 0x01
/* TX lane BIST enable. Active High */
#define SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM 8
#define SERDES_IREG_FLD_PCSTXBIST_EN 0x02
/*
* RX BIST completion signal 0 - Indicates test is not completed 1 - Indicates
* the test has completed, and will remain high until a new test is initiated
*/
#define SERDES_IREG_FLD_RXBIST_DONE_REG_NUM 8
#define SERDES_IREG_FLD_RXBIST_DONE 0x04
/*
* RX BIST error count overflow indicator. Indicates an overflow in the number
* of byte errors identified during the course of the test. This word is stable
* to sample when *_DONE_* signal has asserted
*/
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM 8
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW 0x08
/*
* RX BIST locked indicator 0 - Indicates BIST is not word locked and error
* comparisons have not begun yet 1 - Indicates BIST is word locked and error
* comparisons have begun
*/
#define SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM 8
#define SERDES_IREG_FLD_RXBIST_RXLOCKED 0x10
/*
* RX BIST error count word. Indicates the number of byte errors identified
* during the course of the test. This word is stable to sample when *_DONE_*
* signal has asserted
*/
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM 9
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM 10
/* Tx params */
#define SERDES_IREG_TX_DRV_1_REG_NUM 21
#define SERDES_IREG_TX_DRV_1_HLEV_MASK 0x7
#define SERDES_IREG_TX_DRV_1_HLEV_SHIFT 0
#define SERDES_IREG_TX_DRV_1_LEVN_MASK 0xf8
#define SERDES_IREG_TX_DRV_1_LEVN_SHIFT 3
#define SERDES_IREG_TX_DRV_2_REG_NUM 22
#define SERDES_IREG_TX_DRV_2_LEVNM1_MASK 0xf
#define SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT 0
#define SERDES_IREG_TX_DRV_2_LEVNM2_MASK 0x30
#define SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT 4
#define SERDES_IREG_TX_DRV_3_REG_NUM 23
#define SERDES_IREG_TX_DRV_3_LEVNP1_MASK 0x7
#define SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT 0
#define SERDES_IREG_TX_DRV_3_SLEW_MASK 0x18
#define SERDES_IREG_TX_DRV_3_SLEW_SHIFT 3
/* Rx params */
#define SERDES_IREG_RX_CALEQ_1_REG_NUM 24
#define SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK 0x7
#define SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT 0
/* DFE post-shaping tap 3dB frequency */
#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK 0x38
#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT 3
#define SERDES_IREG_RX_CALEQ_2_REG_NUM 25
/* DFE post-shaping tap gain */
#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK 0x7
#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT 0
/* DFE first tap gain control */
#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK 0x78
#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT 3
#define SERDES_IREG_RX_CALEQ_3_REG_NUM 26
#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK 0xf
#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT 0
#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK 0xf0
#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT 4
#define SERDES_IREG_RX_CALEQ_4_REG_NUM 27
#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK 0xf
#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT 0
#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK 0x70
#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT 4
#define SERDES_IREG_RX_CALEQ_5_REG_NUM 28
#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK 0x7
#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT 0
#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK 0xf8
#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT 3
/* RX lane best eye point measurement result */
#define SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM 29
#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM 30
#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK 0x3F
/*
* Adaptive RX Equalization enable
* 0 - Disables adaptive RX equalization.
* 1 - Enables adaptive RX equalization.
*/
#define SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM 31
#define SERDES_IREG_FLD_PCSRXEQ_START (1 << 0)
/*
* Enables an eye diagram measurement
* within the PHY.
* 0 - Disables eye diagram measurement
* 1 - Enables eye diagram measurement
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM 31
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START (1 << 1)
/*
* RX lane single roam eye point measurement start signal.
* If asserted, single measurement at fix XADJUST and YADJUST is started.
*/
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM 31
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START (1 << 2)
/*
* PHY Eye diagram measurement status
* signal
* 0 - Indicates eye diagram results are not
* valid for sampling
* 1 - Indicates eye diagram is complete and
* results are valid for sampling
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM 32
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE (1 << 0)
/*
* Eye diagram error signal. Indicates if the
* measurement was invalid because the eye
* diagram was interrupted by the link entering
* electrical idle.
* 0 - Indicates eye diagram is valid
* 1- Indicates an error occurred, and the eye
* diagram measurement should be re-run
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR_REG_NUM 32
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR (1 << 1)
/*
* PHY Adaptive Equalization status
* 0 - Indicates Adaptive Equalization results are not valid for sampling
* 1 - Indicates Adaptive Equalization is complete and results are valid for
* sampling
*/
#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM 32
#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE (1 << 2)
/*
*
* PHY Adaptive Equalization Status Signal
* 0 Indicates adaptive equalization results
* are not valid for sampling
* 1 Indicates adaptive equalization is
* complete and results are valid for sampling.
*/
#define SERDES_IREG_FLD_RXEQ_DONE_REG_NUM 32
#define SERDES_IREG_FLD_RXEQ_DONE (1 << 3)
/*
* 7-bit eye diagram time adjust control
* - 6-bits per UI
* - spans 2 UI
*/
#define SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM 33
/* 6-bit eye diagram voltage adjust control - spans +/-300mVdiff */
#define SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM 34
/*
* Eye diagram status signal. Safe for
* sampling when *DONE* signal has
* asserted
* 14'h0000 - Completely Closed Eye
* 14'hFFFF - Completely Open Eye
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM 35
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_MAKE 0xFF
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_SHIFT 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM 36
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE 0x3F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_SHIFT 0
/*
* RX lane single roam eye point measurement result.
* If 0, eye is open at current XADJUST and YADJUST settings.
*/
#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM 37
#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM 38
/*
* Override enable for CDR lock to reference clock
* 0 - CDR is always locked to reference
* 1 - CDR operation mode (Lock2Reference or Lock2data are controlled internally
* depending on the incoming signal and ppm status)
*/
#define SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM 39
#define SERDES_IREG_FLD_RXLOCK2REF_OVREN (1 << 1)
/*
* Selects Eye to capture based on edge
* 0 - Capture 1st Eye in Eye Diagram
* 1 - Capture 2nd Eye in Eye Diagram measurement
*/
#define SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM 39
#define SERDES_IREG_FLD_RXROAM_XORBITSEL (1 << 2)
#define SERDES_IREG_FLD_RXROAM_XORBITSEL_1ST 0
#define SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND (1 << 2)
/*
* RX Signal detect. 0 indicates no signal, 1 indicates signal detected.
*/
#define SERDES_IREG_FLD_RXRANDET_REG_NUM 41
#define SERDES_IREG_FLD_RXRANDET_STAT 0x20
/*
* RX data polarity inversion control:
* 1'b0: no inversion
* 1'b1: invert polarity
*/
#define SERDES_IREG_FLD_POLARITY_RX_REG_NUM 46
#define SERDES_IREG_FLD_POLARITY_RX_INV (1 << 0)
/*
* TX data polarity inversion control:
* 1'b0: no inversion
* 1'b1: invert polarity
*/
#define SERDES_IREG_FLD_POLARITY_TX_REG_NUM 46
#define SERDES_IREG_FLD_POLARITY_TX_INV (1 << 1)
/* LANEPCSPSTATE* override enable (Active low) */
#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN (1 << 0)
/* LB* override enable (Active low) */
#define SERDES_IREG_FLD_LB_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_LB_LOCWREN (1 << 1)
/* PCSRX* override enable (Active low) */
#define SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSRX_LOCWREN (1 << 4)
/* PCSRXBIST* override enable (Active low) */
#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN (1 << 5)
/* PCSRXEQ* override enable (Active low) */
#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN (1 << 6)
/* PCSTX* override enable (Active low) */
#define SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSTX_LOCWREN (1 << 7)
/*
* group registers:
* SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN,
* SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN
* SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
*/
#define SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM 86
/* PCSTXBIST* override enable (Active low) */
#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN (1 << 0)
/* Override RX_CALCEQ through the internal registers (Active low) */
#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM 86
#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN (1 << 3)
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN (1 << 4)
/* RXCALROAMEYEMEASIN* override enable - Active Low */
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN (1 << 6)
/* RXCALROAMXADJUST* override enable - Active Low */
#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN (1 << 7)
/* RXCALROAMYADJUST* override enable - Active Low */
#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN (1 << 0)
/* RXCDRCALFOSC* override enable. Active Low */
#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN (1 << 1)
/* Over-write enable for RXEYEDIAGFSM_INITXVAL */
#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN (1 << 2)
/* Over-write enable for CMNCLKGENMUXSEL_TXINTERNAL */
#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN (1 << 3)
/* TXCALTCLKDUTY* override enable. Active Low */
#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN (1 << 4)
/* Override TX_DRV through the internal registers (Active low) */
#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM 87
#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN (1 << 5)
/*******************************************************************************
* Common lane register fields - PMA
******************************************************************************/
/*
* Common lane hard reset control
* 0 - Hard reset is taken from the interface pins
* 1 - Hard reset is taken from registers
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK 0x01
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_IFACE 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS 0x01
/*
* Common lane hard reset
* 0 - Hard reset is asserted
* 1 - Hard reset is de-asserted
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK 0x02
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT 0x02
/* Synth power state control */
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM 3
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK 0x1f
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD 0x01
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2 0x02
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1 0x04
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S 0x08
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0 0x10
/* Transmit datapath FIFO enable (Active High) */
#define SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM 8
#define SERDES_IREG_FLD_CMNPCS_TXENABLE (1 << 2)
/*
* RX lost of signal detector enable
* - 0 - disable
* - 1 - enable
*/
#define SERDES_IREG_FLD_RXLOSDET_ENABLE_REG_NUM 13
#define SERDES_IREG_FLD_RXLOSDET_ENABLE AL_BIT(4)
/* Signal Detect Threshold Level */
#define SERDES_IREG_FLD_RXELECIDLE_SIGDETTHRESH_REG_NUM 15
#define SERDES_IREG_FLD_RXELECIDLE_SIGDETTHRESH_MASK AL_FIELD_MASK(2, 0)
/* LOS Detect Threshold Level */
#define SERDES_IREG_FLD_RXLOSDET_THRESH_REG_NUM 15
#define SERDES_IREG_FLD_RXLOSDET_THRESH_MASK AL_FIELD_MASK(4, 3)
#define SERDES_IREG_FLD_RXLOSDET_THRESH_SHIFT 3
#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM 30
#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM 31
#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM 32
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM 33
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK 0x1
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM 33
#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK 0x3e
#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT 1
#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM 34
#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM 35
#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK 0x1
#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM 35
#define SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK 0x3e
#define SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT 1
#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM 36
#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK 0xff
#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM 37
#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK 0x7
#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM 43
#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK 0x7
#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT 0
#define SERDES_IREG_FLD_TX_BIST_PAT_REG_NUM(byte_num) (56 + (byte_num))
#define SERDES_IREG_FLD_TX_BIST_PAT_NUM_BYTES 10
/*
* Selects the transmit BIST mode:
* 0 - Uses the 80-bit internal memory pattern (w/ OOB)
* 1 - Uses a 27 PRBS pattern
* 2 - Uses a 223 PRBS pattern
* 3 - Uses a 231 PRBS pattern
* 4 - Uses a 1010 clock pattern
* 5 and above - Reserved
*/
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM 80
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK 0x07
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_USER 0x00
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7 0x01
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23 0x02
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31 0x03
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010 0x04
/* Single-Bit error injection enable (on posedge) */
#define SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM 80
#define SERDES_IREG_FLD_TXBIST_BITERROR_EN 0x20
/* CMNPCIEGEN3* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN (1 << 2)
/* CMNPCS* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCS_LOCWREN (1 << 3)
/* CMNPCSBIST* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN (1 << 4)
/* CMNPCSPSTATE* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN (1 << 5)
/* PCS_EN* override enable (Active Low) */
#define SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM 96
#define SERDES_IREG_FLD_PCS_LOCWREN (1 << 3)
/* Eye diagram sample count */
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM 150
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK 0xff
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT 0
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM 151
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK 0xff
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT 0
/* override control */
#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM 230
#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN 1 << 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM 623
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK 0xff
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM 624
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK 0xff
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT 0
/* X and Y coefficient return value */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM 626
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_MASK 0x0F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_SHIFT 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_MASK 0xF0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_SHIFT 4
/* X coarse scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM 627
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK 0x7F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT 0
/* X fine scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM 628
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK 0x7F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT 0
/* Y coarse scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM 629
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK 0x0F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT 0
/* Y fine scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM 630
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK 0x0F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT 0
#define SERDES_IREG_FLD_PPMDRIFTCOUNT1_REG_NUM 157
#define SERDES_IREG_FLD_PPMDRIFTCOUNT2_REG_NUM 158
#define SERDES_IREG_FLD_PPMDRIFTMAX1_REG_NUM 159
#define SERDES_IREG_FLD_PPMDRIFTMAX2_REG_NUM 160
#define SERDES_IREG_FLD_SYNTHPPMDRIFTMAX1_REG_NUM 163
#define SERDES_IREG_FLD_SYNTHPPMDRIFTMAX2_REG_NUM 164
/*******************************************************************************
* Common lane register fields - PCS
******************************************************************************/
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_REG_NUM 3
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_MASK AL_FIELD_MASK(5, 4)
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_SHIFT 4
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA_REG_NUM 6
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA AL_BIT(2)
#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_NUM 18
#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_MASK 0x1F
#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_SHIFT 0
#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_NUM 19
#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_MASK 0x7C
#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_SHIFT 2
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_NUM 20
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_MASK 0x1F
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_SHIFT 0
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_NUM 21
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_MASK 0x7C
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_SHIFT 2
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_REG_NUM 22
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_REG_NUM 34
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_REG_NUM 23
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_REG_NUM 22
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_MASK 0x80
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_SHIFT 7
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_REG_NUM 24
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_MASK 0x3e
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_SHIFT 1
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_REG_NUM 35
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_REG_NUM 34
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_MASK 0x80
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_SHIFT 7
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_REG_NUM 36
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_MASK 0x1f
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_REG_NUM 37
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_MASK 0xff
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_REG_NUM 36
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_MASK 0xe0
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_SHIFT 5
#ifdef __cplusplus
}
#endif
#endif /* __AL_serdes_REG_H */

View file

@ -0,0 +1,495 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_serdes_regs.h
*
* @brief ... registers
*
*/
#ifndef __AL_HAL_SERDES_REGS_H__
#define __AL_HAL_SERDES_REGS_H__
#include "al_hal_plat_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Unit Registers
*/
struct serdes_gen {
/* [0x0] SerDes Registers Version */
uint32_t version;
uint32_t rsrvd_0[3];
/* [0x10] SerDes register file address */
uint32_t reg_addr;
/* [0x14] SerDes register file data */
uint32_t reg_data;
uint32_t rsrvd_1[2];
/* [0x20] SerDes control */
uint32_t ictl_multi_bist;
/* [0x24] SerDes control */
uint32_t ictl_pcs;
/* [0x28] SerDes control */
uint32_t ictl_pma;
uint32_t rsrvd_2;
/* [0x30] SerDes control */
uint32_t ipd_multi_synth;
/* [0x34] SerDes control */
uint32_t irst;
/* [0x38] SerDes control */
uint32_t octl_multi_synthready;
/* [0x3c] SerDes control */
uint32_t octl_multi_synthstatus;
/* [0x40] SerDes control */
uint32_t clk_out;
uint32_t rsrvd[47];
};
struct serdes_lane {
uint32_t rsrvd1[4];
/* [0x10] SerDes status */
uint32_t octl_pma;
/* [0x14] SerDes control */
uint32_t ictl_multi_andme;
/* [0x18] SerDes control */
uint32_t ictl_multi_lb;
/* [0x1c] SerDes control */
uint32_t ictl_multi_rxbist;
/* [0x20] SerDes control */
uint32_t ictl_multi_txbist;
/* [0x24] SerDes control */
uint32_t ictl_multi;
/* [0x28] SerDes control */
uint32_t ictl_multi_rxeq;
/* [0x2c] SerDes control */
uint32_t ictl_multi_rxeq_l_low;
/* [0x30] SerDes control */
uint32_t ictl_multi_rxeq_l_high;
/* [0x34] SerDes control */
uint32_t ictl_multi_rxeyediag;
/* [0x38] SerDes control */
uint32_t ictl_multi_txdeemph;
/* [0x3c] SerDes control */
uint32_t ictl_multi_txmargin;
/* [0x40] SerDes control */
uint32_t ictl_multi_txswing;
/* [0x44] SerDes control */
uint32_t idat_multi;
/* [0x48] SerDes control */
uint32_t ipd_multi;
/* [0x4c] SerDes control */
uint32_t octl_multi_rxbist;
/* [0x50] SerDes control */
uint32_t octl_multi;
/* [0x54] SerDes control */
uint32_t octl_multi_rxeyediag;
/* [0x58] SerDes control */
uint32_t odat_multi_rxbist;
/* [0x5c] SerDes control */
uint32_t odat_multi_rxeq;
/* [0x60] SerDes control */
uint32_t multi_rx_dvalid;
/* [0x64] SerDes control */
uint32_t reserved;
uint32_t rsrvd[6];
};
struct al_serdes_regs {
uint32_t rsrvd_0[64];
struct serdes_gen gen; /* [0x100] */
struct serdes_lane lane[4]; /* [0x200] */
};
/*
* Registers Fields
*/
/**** version register ****/
/* Revision number (Minor) */
#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
/* Revision number (Major) */
#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
/* Date of release */
#define SERDES_GEN_VERSION_DATE_DAY_MASK 0x001F0000
#define SERDES_GEN_VERSION_DATE_DAY_SHIFT 16
/* Month of release */
#define SERDES_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
#define SERDES_GEN_VERSION_DATA_MONTH_SHIFT 21
/* Year of release (starting from 2000) */
#define SERDES_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
#define SERDES_GEN_VERSION_DATE_YEAR_SHIFT 25
/* Reserved */
#define SERDES_GEN_VERSION_RESERVED_MASK 0xC0000000
#define SERDES_GEN_VERSION_RESERVED_SHIFT 30
/**** reg_addr register ****/
/* Address value */
#define SERDES_GEN_REG_ADDR_VAL_MASK 0x0000FFFF
#define SERDES_GEN_REG_ADDR_VAL_SHIFT 0
/**** reg_data register ****/
/* Data value */
#define SERDES_GEN_REG_DATA_VAL_MASK 0x000000FF
#define SERDES_GEN_REG_DATA_VAL_SHIFT 0
/**** ICTL_MULTI_BIST register ****/
#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_MASK 0x00000007
#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_SHIFT 0
/**** ICTL_PCS register ****/
#define SERDES_GEN_ICTL_PCS_EN_NT (1 << 0)
/**** ICTL_PMA register ****/
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_MASK 0x00000007
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT 0
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_REF \
(0 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_R2L \
(3 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_L2R \
(4 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_MASK 0x00000070
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT 4
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_0 \
(0 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_REF \
(2 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_R2L \
(3 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_MASK 0x00000700
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT 8
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_0 \
(0 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_REF \
(2 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_L2R \
(3 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC (1 << 11)
#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC_THIS (0 << 11)
#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC_MASTER (1 << 11)
#define SERDES_GEN_ICTL_PMA_TXENABLE_A (1 << 12)
#define SERDES_GEN_ICTL_PMA_SYNTHCKBYPASSEN_NT (1 << 13)
/**** IPD_MULTI_SYNTH register ****/
#define SERDES_GEN_IPD_MULTI_SYNTH_B (1 << 0)
/**** IRST register ****/
#define SERDES_GEN_IRST_PIPE_RST_L3_B_A (1 << 0)
#define SERDES_GEN_IRST_PIPE_RST_L2_B_A (1 << 1)
#define SERDES_GEN_IRST_PIPE_RST_L1_B_A (1 << 2)
#define SERDES_GEN_IRST_PIPE_RST_L0_B_A (1 << 3)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A (1 << 4)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A (1 << 5)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A (1 << 6)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A (1 << 7)
#define SERDES_GEN_IRST_MULTI_HARD_SYNTH_B_A (1 << 8)
#define SERDES_GEN_IRST_POR_B_A (1 << 12)
#define SERDES_GEN_IRST_PIPE_RST_L3_B_A_SEL (1 << 16)
#define SERDES_GEN_IRST_PIPE_RST_L2_B_A_SEL (1 << 17)
#define SERDES_GEN_IRST_PIPE_RST_L1_B_A_SEL (1 << 18)
#define SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL (1 << 19)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A_SEL (1 << 20)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A_SEL (1 << 21)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A_SEL (1 << 22)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A_SEL (1 << 23)
/**** OCTL_MULTI_SYNTHREADY register ****/
#define SERDES_GEN_OCTL_MULTI_SYNTHREADY_A (1 << 0)
/**** OCTL_MULTI_SYNTHSTATUS register ****/
#define SERDES_GEN_OCTL_MULTI_SYNTHSTATUS_A (1 << 0)
/**** clk_out register ****/
#define SERDES_GEN_CLK_OUT_SEL_MASK 0x0000003F
#define SERDES_GEN_CLK_OUT_SEL_SHIFT 0
/**** OCTL_PMA register ****/
#define SERDES_LANE_OCTL_PMA_TXSTATUS_L_A (1 << 0)
/**** ICTL_MULTI_ANDME register ****/
#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A (1 << 0)
#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A_SEL (1 << 1)
/**** ICTL_MULTI_LB register ****/
#define SERDES_LANE_ICTL_MULTI_LB_TX2RXIOTIMEDEN_L_NT (1 << 0)
#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT (1 << 1)
#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT (1 << 2)
#define SERDES_LANE_ICTL_MULTI_LB_PARRX2TXTIMEDEN_L_NT (1 << 3)
#define SERDES_LANE_ICTL_MULTI_LB_CDRCLK2TXEN_L_NT (1 << 4)
#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT_SEL (1 << 8)
#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT_SEL (1 << 9)
/**** ICTL_MULTI_RXBIST register ****/
#define SERDES_LANE_ICTL_MULTI_RXBIST_EN_L_A (1 << 0)
/**** ICTL_MULTI_TXBIST register ****/
#define SERDES_LANE_ICTL_MULTI_TXBIST_EN_L_A (1 << 0)
/**** ICTL_MULTI register ****/
#define SERDES_LANE_ICTL_MULTI_PSTATE_L_MASK 0x00000003
#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SHIFT 0
#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SEL (1 << 2)
#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_MASK 0x00000070
#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_SHIFT 4
#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATAEN_L_A (1 << 8)
#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATA_L_A (1 << 9)
#define SERDES_LANE_ICTL_MULTI_TXBEACON_L_A (1 << 12)
#define SERDES_LANE_ICTL_MULTI_TXDETECTRXREQ_L_A (1 << 13)
#define SERDES_LANE_ICTL_MULTI_RXRATE_L_MASK 0x00070000
#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SHIFT 16
#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SEL (1 << 19)
#define SERDES_LANE_ICTL_MULTI_TXRATE_L_MASK 0x00700000
#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SHIFT 20
#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SEL (1 << 23)
#define SERDES_LANE_ICTL_MULTI_TXAMP_L_MASK 0x07000000
#define SERDES_LANE_ICTL_MULTI_TXAMP_L_SHIFT 24
#define SERDES_LANE_ICTL_MULTI_TXAMP_EN_L (1 << 27)
#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_MASK 0x70000000
#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_SHIFT 28
/**** ICTL_MULTI_RXEQ register ****/
#define SERDES_LANE_ICTL_MULTI_RXEQ_EN_L (1 << 0)
#define SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A (1 << 1)
#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_MASK 0x00000070
#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_SHIFT 4
/**** ICTL_MULTI_RXEQ_L_high register ****/
#define SERDES_LANE_ICTL_MULTI_RXEQ_L_HIGH_VAL (1 << 0)
/**** ICTL_MULTI_RXEYEDIAG register ****/
#define SERDES_LANE_ICTL_MULTI_RXEYEDIAG_START_L_A (1 << 0)
/**** ICTL_MULTI_TXDEEMPH register ****/
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_MASK 0x0003FFFF
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_SHIFT 0
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_MASK 0x7c0
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_SHIFT 6
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_MASK 0xf000
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_SHIFT 12
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_MASK 0x7
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_SHIFT 0
/**** ICTL_MULTI_TXMARGIN register ****/
#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_MASK 0x00000007
#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_SHIFT 0
/**** ICTL_MULTI_TXSWING register ****/
#define SERDES_LANE_ICTL_MULTI_TXSWING_L (1 << 0)
/**** IDAT_MULTI register ****/
#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_MASK 0x0000000F
#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SHIFT 0
#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SEL (1 << 4)
/**** IPD_MULTI register ****/
#define SERDES_LANE_IPD_MULTI_TX_L_B (1 << 0)
#define SERDES_LANE_IPD_MULTI_RX_L_B (1 << 1)
/**** OCTL_MULTI_RXBIST register ****/
#define SERDES_LANE_OCTL_MULTI_RXBIST_DONE_L_A (1 << 0)
#define SERDES_LANE_OCTL_MULTI_RXBIST_RXLOCKED_L_A (1 << 1)
/**** OCTL_MULTI register ****/
#define SERDES_LANE_OCTL_MULTI_RXCDRLOCK2DATA_L_A (1 << 0)
#define SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A (1 << 1)
#define SERDES_LANE_OCTL_MULTI_RXREADY_L_A (1 << 2)
#define SERDES_LANE_OCTL_MULTI_RXSTATUS_L_A (1 << 3)
#define SERDES_LANE_OCTL_MULTI_TXREADY_L_A (1 << 4)
#define SERDES_LANE_OCTL_MULTI_TXDETECTRXSTAT_L_A (1 << 5)
#define SERDES_LANE_OCTL_MULTI_TXDETECTRXACK_L_A (1 << 6)
#define SERDES_LANE_OCTL_MULTI_RXSIGNALDETECT_L_A (1 << 7)
/**** OCTL_MULTI_RXEYEDIAG register ****/
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_MASK 0x00003FFF
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_SHIFT 0
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_DONE_L_A (1 << 16)
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_ERR_L_A (1 << 17)
/**** ODAT_MULTI_RXBIST register ****/
#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_MASK 0x0000FFFF
#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_SHIFT 0
#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_OVERFLOW_L_A (1 << 16)
/**** ODAT_MULTI_RXEQ register ****/
#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_MASK 0x00003FFF
#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_SHIFT 0
/**** MULTI_RX_DVALID register ****/
#define SERDES_LANE_MULTI_RX_DVALID_MASK_CDR_LOCK (1 << 0)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_SIGNALDETECT (1 << 1)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_TX_READY (1 << 2)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_READY (1 << 3)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_SYNT_READY (1 << 4)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_ELECIDLE (1 << 5)
#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_MASK 0x00FF0000
#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_SHIFT 16
#define SERDES_LANE_MULTI_RX_DVALID_PS_00_SEL (1 << 24)
#define SERDES_LANE_MULTI_RX_DVALID_PS_00_VAL (1 << 25)
#define SERDES_LANE_MULTI_RX_DVALID_PS_01_SEL (1 << 26)
#define SERDES_LANE_MULTI_RX_DVALID_PS_01_VAL (1 << 27)
#define SERDES_LANE_MULTI_RX_DVALID_PS_10_SEL (1 << 28)
#define SERDES_LANE_MULTI_RX_DVALID_PS_10_VAL (1 << 29)
#define SERDES_LANE_MULTI_RX_DVALID_PS_11_SEL (1 << 30)
#define SERDES_LANE_MULTI_RX_DVALID_PS_11_VAL (1 << 31)
/**** reserved register ****/
#define SERDES_LANE_RESERVED_OUT_MASK 0x000000FF
#define SERDES_LANE_RESERVED_OUT_SHIFT 0
#define SERDES_LANE_RESERVED_IN_MASK 0x00FF0000
#define SERDES_LANE_RESERVED_IN_SHIFT 16
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_serdes_REGS_H__ */
/** @} end of ... group */

View file

@ -0,0 +1,672 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_udma_api API
* @ingroup group_udma
* UDMA API
* @{
* @}
*
* @defgroup group_udma_main UDMA Main
* @ingroup group_udma_api
* UDMA main API
* @{
* @file al_hal_udma.h
*
* @brief C Header file for the Universal DMA HAL driver
*
*/
#ifndef __AL_HAL_UDMA_H__
#define __AL_HAL_UDMA_H__
#include "al_hal_common.h"
#include "al_hal_udma_regs.h"
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
#define DMA_MAX_Q 4
#define AL_UDMA_MIN_Q_SIZE 4
#define AL_UDMA_MAX_Q_SIZE (1 << 16) /* hw can do more, but we limit it */
/* Default Max number of descriptors supported per action */
#define AL_UDMA_DEFAULT_MAX_ACTN_DESCS 16
#define AL_UDMA_REV_ID_0 0
#define AL_UDMA_REV_ID_1 1
#define AL_UDMA_REV_ID_2 2
#define DMA_RING_ID_MASK 0x3
/* New registers ?? */
/* Statistics - TBD */
/** UDMA submission descriptor */
union al_udma_desc {
/* TX */
struct {
uint32_t len_ctrl;
uint32_t meta_ctrl;
uint64_t buf_ptr;
} tx;
/* TX Meta, used by upper layer */
struct {
uint32_t len_ctrl;
uint32_t meta_ctrl;
uint32_t meta1;
uint32_t meta2;
} tx_meta;
/* RX */
struct {
uint32_t len_ctrl;
uint32_t buf2_ptr_lo;
uint64_t buf1_ptr;
} rx;
} __packed_a16;
/* TX desc length and control fields */
#define AL_M2S_DESC_CONCAT AL_BIT(31) /* concatenate */
#define AL_M2S_DESC_DMB AL_BIT(30)
/** Data Memory Barrier */
#define AL_M2S_DESC_NO_SNOOP_H AL_BIT(29)
#define AL_M2S_DESC_INT_EN AL_BIT(28) /** enable interrupt */
#define AL_M2S_DESC_LAST AL_BIT(27)
#define AL_M2S_DESC_FIRST AL_BIT(26)
#define AL_M2S_DESC_RING_ID_SHIFT 24
#define AL_M2S_DESC_RING_ID_MASK (0x3 << AL_M2S_DESC_RING_ID_SHIFT)
#define AL_M2S_DESC_META_DATA AL_BIT(23)
#define AL_M2S_DESC_DUMMY AL_BIT(22) /* for Metdata only */
#define AL_M2S_DESC_LEN_ADJ_SHIFT 20
#define AL_M2S_DESC_LEN_ADJ_MASK (0x7 << AL_M2S_DESC_LEN_ADJ_SHIFT)
#define AL_M2S_DESC_LEN_SHIFT 0
#define AL_M2S_DESC_LEN_MASK (0xfffff << AL_M2S_DESC_LEN_SHIFT)
#define AL_S2M_DESC_DUAL_BUF AL_BIT(31)
#define AL_S2M_DESC_NO_SNOOP_H AL_BIT(29)
#define AL_S2M_DESC_INT_EN AL_BIT(28) /** enable interrupt */
#define AL_S2M_DESC_RING_ID_SHIFT 24
#define AL_S2M_DESC_RING_ID_MASK (0x3 << AL_S2M_DESC_RING_ID_SHIFT)
#define AL_S2M_DESC_LEN_SHIFT 0
#define AL_S2M_DESC_LEN_MASK (0xffff << AL_S2M_DESC_LEN_SHIFT)
#define AL_S2M_DESC_LEN2_SHIFT 16
#define AL_S2M_DESC_LEN2_MASK (0x3fff << AL_S2M_DESC_LEN2_SHIFT)
#define AL_S2M_DESC_LEN2_GRANULARITY_SHIFT 6
/* TX/RX descriptor VMID field (in the buffer address 64 bit field) */
#define AL_UDMA_DESC_VMID_SHIFT 48
/** UDMA completion descriptor */
union al_udma_cdesc {
/* TX completion */
struct {
uint32_t ctrl_meta;
} al_desc_comp_tx;
/* RX completion */
struct {
/* TBD */
uint32_t ctrl_meta;
} al_desc_comp_rx;
} __packed_a4;
/* TX/RX common completion desc ctrl_meta feilds */
#define AL_UDMA_CDESC_ERROR AL_BIT(31)
#define AL_UDMA_CDESC_BUF1_USED AL_BIT(30)
#define AL_UDMA_CDESC_DDP AL_BIT(29)
#define AL_UDMA_CDESC_LAST AL_BIT(27)
#define AL_UDMA_CDESC_FIRST AL_BIT(26)
/* word 2 */
#define AL_UDMA_CDESC_BUF2_USED AL_BIT(31)
#define AL_UDMA_CDESC_BUF2_LEN_SHIFT 16
#define AL_UDMA_CDESC_BUF2_LEN_MASK AL_FIELD_MASK(29, 16)
/** Basic Buffer structure */
struct al_buf {
al_phys_addr_t addr; /**< Buffer physical address */
uint32_t len; /**< Buffer lenght in bytes */
};
/** Block is a set of buffers that belong to same source or destination */
struct al_block {
struct al_buf *bufs; /**< The buffers of the block */
uint32_t num; /**< Number of buffers of the block */
/**<
* VMID to be assigned to the block descriptors
* Requires VMID in descriptor to be enabled for the specific UDMA
* queue.
*/
uint16_t vmid;
};
/** UDMA type */
enum al_udma_type {
UDMA_TX,
UDMA_RX
};
/** UDMA state */
enum al_udma_state {
UDMA_DISABLE = 0,
UDMA_IDLE,
UDMA_NORMAL,
UDMA_ABORT,
UDMA_RESET
};
extern const char *const al_udma_states_name[];
/** UDMA Q specific parameters from upper layer */
struct al_udma_q_params {
uint32_t size; /**< ring size (in descriptors), submission and
* completion rings must have same size
*/
union al_udma_desc *desc_base; /**< cpu address for submission ring
* descriptors
*/
al_phys_addr_t desc_phy_base; /**< submission ring descriptors
* physical base address
*/
#ifdef __FreeBSD__
bus_dma_tag_t desc_phy_base_tag;
bus_dmamap_t desc_phy_base_map;
#endif
uint8_t *cdesc_base; /**< completion descriptors pointer, NULL */
/* means no completion update */
al_phys_addr_t cdesc_phy_base; /**< completion descriptors ring
* physical base address
*/
#ifdef __FreeBSD__
bus_dma_tag_t cdesc_phy_base_tag;
bus_dmamap_t cdesc_phy_base_map;
#endif
uint32_t cdesc_size; /**< size (in bytes) of a single dma completion
* descriptor
*/
uint8_t adapter_rev_id; /**<PCI adapter revision ID */
};
/** UDMA parameters from upper layer */
struct al_udma_params {
struct unit_regs __iomem *udma_regs_base;
enum al_udma_type type; /**< Tx or Rx */
uint8_t num_of_queues; /**< number of queues supported by the UDMA */
const char *name; /**< the upper layer must keep the string area */
};
/* Fordward decleration */
struct al_udma;
/** SW status of a queue */
enum al_udma_queue_status {
AL_QUEUE_NOT_INITIALIZED = 0,
AL_QUEUE_DISABLED,
AL_QUEUE_ENABLED,
AL_QUEUE_ABORTED
};
/** UDMA Queue private data structure */
struct __cache_aligned al_udma_q {
uint16_t size_mask; /**< mask used for pointers wrap around
* equals to size - 1
*/
union udma_q_regs __iomem *q_regs; /**< pointer to the per queue UDMA
* registers
*/
union al_udma_desc *desc_base_ptr; /**< base address submission ring
* descriptors
*/
uint16_t next_desc_idx; /**< index to the next available submission
* descriptor
*/
uint32_t desc_ring_id; /**< current submission ring id */
uint8_t *cdesc_base_ptr;/**< completion descriptors pointer, NULL */
/* means no completion */
uint32_t cdesc_size; /**< size (in bytes) of the udma completion ring
* descriptor
*/
uint16_t next_cdesc_idx; /**< index in descriptors for next completing
* ring descriptor
*/
uint8_t *end_cdesc_ptr; /**< used for wrap around detection */
uint16_t comp_head_idx; /**< completion ring head pointer register
*shadow
*/
volatile union al_udma_cdesc *comp_head_ptr; /**< when working in get_packet mode
* we maintain pointer instead of the
* above idx
*/
uint32_t pkt_crnt_descs; /**< holds the number of processed descriptors
* of the current packet
*/
uint32_t comp_ring_id; /**< current completion Ring Id */
al_phys_addr_t desc_phy_base; /**< submission desc. physical base */
al_phys_addr_t cdesc_phy_base; /**< completion desc. physical base */
uint32_t flags; /**< flags used for completion modes */
uint32_t size; /**< ring size in descriptors */
enum al_udma_queue_status status;
struct al_udma *udma; /**< pointer to parent UDMA */
uint32_t qid; /**< the index number of the queue */
/*
* The following fields are duplicated from the UDMA parent adapter
* due to performance considerations.
*/
uint8_t adapter_rev_id; /**<PCI adapter revision ID */
};
/* UDMA */
struct al_udma {
const char *name;
enum al_udma_type type; /* Tx or Rx */
enum al_udma_state state;
uint8_t num_of_queues; /* number of queues supported by the UDMA */
union udma_regs __iomem *udma_regs; /* pointer to the UDMA registers */
struct udma_gen_regs *gen_regs; /* pointer to the Gen registers*/
struct al_udma_q udma_q[DMA_MAX_Q]; /* Array of UDMA Qs pointers */
unsigned int rev_id; /* UDMA revision ID */
};
/*
* Configurations
*/
/* Initializations functions */
/**
* Initialize the udma engine
*
* @param udma udma data structure
* @param udma_params udma parameters from upper layer
*
* @return 0 on success. -EINVAL otherwise.
*/
int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params);
/**
* Initialize the udma queue data structure
*
* @param udma
* @param qid
* @param q_params
*
* @return 0 if no error found.
* -EINVAL if the qid is out of range
* -EIO if queue was already initialized
*/
int al_udma_q_init(struct al_udma *udma, uint32_t qid,
struct al_udma_q_params *q_params);
/**
* Reset a udma queue
*
* Prior to calling this function make sure:
* 1. Queue interrupts are masked
* 2. No additional descriptors are written to the descriptor ring of the queue
* 3. No completed descriptors are being fetched
*
* The queue can be initialized again using 'al_udma_q_init'
*
* @param udma_q
*
* @return 0 if no error found.
*/
int al_udma_q_reset(struct al_udma_q *udma_q);
/**
* return (by reference) a pointer to a specific queue date structure.
* this pointer needed for calling functions (i.e. al_udma_desc_action_add) that
* require this pointer as input argument.
*
* @param udma udma data structure
* @param qid queue index
* @param q_handle pointer to the location where the queue structure pointer
* written to.
*
* @return 0 on success. -EINVAL otherwise.
*/
int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
struct al_udma_q **q_handle);
/**
* Change the UDMA's state
*
* @param udma udma data structure
* @param state the target state
*
* @return 0
*/
int al_udma_state_set(struct al_udma *udma, enum al_udma_state state);
/**
* return the current UDMA hardware state
*
* @param udma udma handle
*
* @return the UDMA state as reported by the hardware.
*/
enum al_udma_state al_udma_state_get(struct al_udma *udma);
/*
* Action handling
*/
/**
* get number of descriptors that can be submitted to the udma.
* keep one free descriptor to simplify full/empty management
* @param udma_q queue handle
*
* @return num of free descriptors.
*/
static INLINE uint32_t al_udma_available_get(struct al_udma_q *udma_q)
{
uint16_t tmp = udma_q->next_cdesc_idx - (udma_q->next_desc_idx + 1);
tmp &= udma_q->size_mask;
return (uint32_t) tmp;
}
/**
* check if queue has pending descriptors
*
* @param udma_q queue handle
*
* @return AL_TRUE if descriptors are submitted to completion ring and still
* not completed (with ack). AL_FALSE otherwise.
*/
static INLINE al_bool al_udma_is_empty(struct al_udma_q *udma_q)
{
if (((udma_q->next_cdesc_idx - udma_q->next_desc_idx) &
udma_q->size_mask) == 0)
return AL_TRUE;
return AL_FALSE;
}
/**
* get next available descriptor
* @param udma_q queue handle
*
* @return pointer to the next available descriptor
*/
static INLINE union al_udma_desc *al_udma_desc_get(struct al_udma_q *udma_q)
{
union al_udma_desc *desc;
uint16_t next_desc_idx;
al_assert(udma_q);
next_desc_idx = udma_q->next_desc_idx;
desc = udma_q->desc_base_ptr + next_desc_idx;
next_desc_idx++;
/* if reached end of queue, wrap around */
udma_q->next_desc_idx = next_desc_idx & udma_q->size_mask;
return desc;
}
/**
* get ring id for the last allocated descriptor
* @param udma_q
*
* @return ring id for the last allocated descriptor
* this function must be called each time a new descriptor is allocated
* by the al_udma_desc_get(), unless ring id is ignored.
*/
static INLINE uint32_t al_udma_ring_id_get(struct al_udma_q *udma_q)
{
uint32_t ring_id;
al_assert(udma_q);
ring_id = udma_q->desc_ring_id;
/* calculate the ring id of the next desc */
/* if next_desc points to first desc, then queue wrapped around */
if (unlikely(udma_q->next_desc_idx) == 0)
udma_q->desc_ring_id = (udma_q->desc_ring_id + 1) &
DMA_RING_ID_MASK;
return ring_id;
}
/* add DMA action - trigger the engine */
/**
* add num descriptors to the submission queue.
*
* @param udma_q queue handle
* @param num number of descriptors to add to the queues ring.
*
* @return 0;
*/
static INLINE int al_udma_desc_action_add(struct al_udma_q *udma_q,
uint32_t num)
{
uint32_t *addr;
al_assert(udma_q);
al_assert((num > 0) && (num <= udma_q->size));
addr = &udma_q->q_regs->rings.drtp_inc;
/* make sure data written to the descriptors will be visible by the */
/* DMA */
al_local_data_memory_barrier();
/*
* As we explicitly invoke the synchronization function
* (al_data_memory_barrier()), then we can use the relaxed version.
*/
al_reg_write32_relaxed(addr, num);
return 0;
}
#define cdesc_is_first(flags) ((flags) & AL_UDMA_CDESC_FIRST)
#define cdesc_is_last(flags) ((flags) & AL_UDMA_CDESC_LAST)
/**
* return pointer to the cdesc + offset desciptors. wrap around when needed.
*
* @param udma_q queue handle
* @param cdesc pointer that set by this function
* @param offset offset desciptors
*
*/
static INLINE volatile union al_udma_cdesc *al_cdesc_next(
struct al_udma_q *udma_q,
volatile union al_udma_cdesc *cdesc,
uint32_t offset)
{
volatile uint8_t *tmp = (volatile uint8_t *) cdesc + offset * udma_q->cdesc_size;
al_assert(udma_q);
al_assert(cdesc);
/* if wrap around */
if (unlikely((tmp > udma_q->end_cdesc_ptr)))
return (union al_udma_cdesc *)
(udma_q->cdesc_base_ptr +
(tmp - udma_q->end_cdesc_ptr - udma_q->cdesc_size));
return (volatile union al_udma_cdesc *) tmp;
}
/**
* check if the flags of the descriptor indicates that is new one
* the function uses the ring id from the descriptor flags to know whether it
* new one by comparing it with the curring ring id of the queue
*
* @param udma_q queue handle
* @param flags the flags of the completion descriptor
*
* @return AL_TRUE if the completion descriptor is new one.
* AL_FALSE if it old one.
*/
static INLINE al_bool al_udma_new_cdesc(struct al_udma_q *udma_q,
uint32_t flags)
{
if (((flags & AL_M2S_DESC_RING_ID_MASK) >> AL_M2S_DESC_RING_ID_SHIFT)
== udma_q->comp_ring_id)
return AL_TRUE;
return AL_FALSE;
}
/**
* get next completion descriptor
* this function will also increment the completion ring id when the ring wraps
* around
*
* @param udma_q queue handle
* @param cdesc current completion descriptor
*
* @return pointer to the completion descriptor that follows the one pointed by
* cdesc
*/
static INLINE volatile union al_udma_cdesc *al_cdesc_next_update(
struct al_udma_q *udma_q,
volatile union al_udma_cdesc *cdesc)
{
/* if last desc, wrap around */
if (unlikely(((volatile uint8_t *) cdesc == udma_q->end_cdesc_ptr))) {
udma_q->comp_ring_id =
(udma_q->comp_ring_id + 1) & DMA_RING_ID_MASK;
return (union al_udma_cdesc *) udma_q->cdesc_base_ptr;
}
return (volatile union al_udma_cdesc *) ((volatile uint8_t *) cdesc + udma_q->cdesc_size);
}
/**
* get next completed packet from completion ring of the queue
*
* @param udma_q udma queue handle
* @param desc pointer that set by this function to the first descriptor
* note: desc is valid only when return value is not zero
* @return number of descriptors that belong to the packet. 0 means no completed
* full packet was found.
* If the descriptors found in the completion queue don't form full packet (no
* desc with LAST flag), then this function will do the following:
* (1) save the number of processed descriptors.
* (2) save last processed descriptor, so next time it called, it will resume
* from there.
* (3) return 0.
* note: the descriptors that belong to the completed packet will still be
* considered as used, that means the upper layer is safe to access those
* descriptors when this function returns. the al_udma_cdesc_ack() should be
* called to inform the udma driver that those descriptors are freed.
*/
uint32_t al_udma_cdesc_packet_get(
struct al_udma_q *udma_q,
volatile union al_udma_cdesc **desc);
/** get completion descriptor pointer from its index */
#define al_udma_cdesc_idx_to_ptr(udma_q, idx) \
((volatile union al_udma_cdesc *) ((udma_q)->cdesc_base_ptr + \
(idx) * (udma_q)->cdesc_size))
/**
* return number of all completed descriptors in the completion ring
*
* @param udma_q udma queue handle
* @param cdesc pointer that set by this function to the first descriptor
* note: desc is valid only when return value is not zero
* note: pass NULL if not interested
* @return number of descriptors. 0 means no completed descriptors were found.
* note: the descriptors that belong to the completed packet will still be
* considered as used, that means the upper layer is safe to access those
* descriptors when this function returns. the al_udma_cdesc_ack() should be
* called to inform the udma driver that those descriptors are freed.
*/
static INLINE uint32_t al_udma_cdesc_get_all(
struct al_udma_q *udma_q,
volatile union al_udma_cdesc **cdesc)
{
uint16_t count = 0;
al_assert(udma_q);
udma_q->comp_head_idx = (uint16_t)
(al_reg_read32(&udma_q->q_regs->rings.crhp) &
0xFFFF);
count = (udma_q->comp_head_idx - udma_q->next_cdesc_idx) &
udma_q->size_mask;
if (cdesc)
*cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
return (uint32_t)count;
}
/**
* acknowledge the driver that the upper layer completed processing completion
* descriptors
*
* @param udma_q udma queue handle
* @param num number of descriptors to acknowledge
*
* @return 0
*/
static INLINE int al_udma_cdesc_ack(struct al_udma_q *udma_q, uint32_t num)
{
al_assert(udma_q);
udma_q->next_cdesc_idx += num;
udma_q->next_cdesc_idx &= udma_q->size_mask;
return 0;
}
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* __AL_HAL_UDMA_H__ */
/** @} end of UDMA group */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,755 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_udma_config UDMA Config
* @ingroup group_udma_api
* UDMA Config API
* @{
* @file al_hal_udma_config.h
*
* @brief C Header file for the Universal DMA HAL driver for configuration APIs
*
*/
#ifndef __AL_HAL_UDMA_CONFIG_H__
#define __AL_HAL_UDMA_CONFIG_H__
#include <al_hal_udma.h>
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
/** Scheduling mode */
enum al_udma_sch_mode {
STRICT, /* Strict */
SRR, /* Simple Sound Rubin */
DWRR /* Deficit Weighted Round Rubin */
};
/** AXI configuration */
struct al_udma_axi_conf {
uint32_t axi_timeout; /* Timeout for AXI transactions */
uint8_t arb_promotion; /* arbitration promotion */
al_bool swap_8_bytes; /* enable 8 bytes swap instead of 4 bytes */
al_bool swap_s2m_data;
al_bool swap_s2m_desc;
al_bool swap_m2s_data;
al_bool swap_m2s_desc;
};
/** UDMA AXI M2S configuration */
struct al_udma_axi_submaster {
uint8_t id; /* AXI ID */
uint8_t cache_type;
uint8_t burst;
uint16_t used_ext;
uint8_t bus_size;
uint8_t qos;
uint8_t prot;
uint8_t max_beats;
};
/** UDMA AXI M2S configuration */
struct al_udma_m2s_axi_conf {
struct al_udma_axi_submaster comp_write;
struct al_udma_axi_submaster data_read;
struct al_udma_axi_submaster desc_read;
al_bool break_on_max_boundary; /* Data read break on max boundary */
uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
uint8_t ostand_max_data_read;
uint8_t ostand_max_desc_read;
uint8_t ostand_max_comp_req;
uint8_t ostand_max_comp_write;
};
/** UDMA AXI S2M configuration */
struct al_udma_s2m_axi_conf {
struct al_udma_axi_submaster data_write;
struct al_udma_axi_submaster desc_read;
struct al_udma_axi_submaster comp_write;
al_bool break_on_max_boundary; /* Data read break on max boundary */
uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
uint8_t ostand_max_data_req;
uint8_t ostand_max_data_write;
uint8_t ostand_max_comp_req;
uint8_t ostand_max_comp_write;
uint8_t ostand_max_desc_read;
uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
};
/** M2S error logging */
struct al_udma_err_log {
uint32_t error_status;
uint32_t header[4];
};
/** M2S max packet size configuration */
struct al_udma_m2s_pkt_len_conf {
uint32_t max_pkt_size;
al_bool encode_64k_as_zero;
};
/** M2S Descriptor Prefetch configuration */
struct al_udma_m2s_desc_pref_conf {
uint8_t desc_fifo_depth;
enum al_udma_sch_mode sch_mode; /* Scheduling mode
* (either strict or RR) */
uint8_t max_desc_per_packet; /* max number of descriptors to
* prefetch */
/* in one burst (5b) */
uint8_t pref_thr;
uint8_t min_burst_above_thr; /* min burst size when fifo above
* pref_thr (4b)
*/
uint8_t min_burst_below_thr; /* min burst size when fifo below
* pref_thr (4b)
*/
uint8_t max_pkt_limit; /* maximum number of packets in the data
* read FIFO, defined based on header
* FIFO size
*/
uint16_t data_fifo_depth; /* maximum number of data beats in the
* data read FIFO,
* defined based on header FIFO size
*/
};
/** S2M Descriptor Prefetch configuration */
struct al_udma_s2m_desc_pref_conf {
uint8_t desc_fifo_depth;
enum al_udma_sch_mode sch_mode; /* Scheduling mode *
* (either strict or RR)
*/
al_bool q_promotion; /* enable promotion */
al_bool force_promotion; /* force promotion */
al_bool en_pref_prediction; /* enable prefetch prediction */
uint8_t promotion_th; /* Threshold for queue promotion */
uint8_t pref_thr;
uint8_t min_burst_above_thr; /* min burst size when fifo above
* pref_thr (4b)
*/
uint8_t min_burst_below_thr; /* min burst size when fifo below
* pref_thr (4b)
*/
uint8_t a_full_thr; /* almost full threshold */
};
/** S2M Data write configuration */
struct al_udma_s2m_data_write_conf {
uint16_t data_fifo_depth; /* maximum number of data beats in the
* data write FIFO, defined based on
* header FIFO size
*/
uint8_t max_pkt_limit; /* maximum number of packets in the
* data write FIFO,defined based on
* header FIFO size
*/
uint8_t fifo_margin;
uint32_t desc_wait_timer; /* waiting time for the host to write
* new descriptor to the queue
* (for the current packet in process)
*/
uint32_t flags; /* bitwise of flags of s2m
* data_cfg_2 register
*/
};
/** S2M Completion configuration */
struct al_udma_s2m_completion_conf {
uint8_t desc_size; /* Size of completion descriptor
* in words
*/
al_bool cnt_words; /* Completion fifo in use counter:
* AL_TRUE words, AL_FALS descriptors
*/
al_bool q_promotion; /* Enable promotion of the current
* unack in progress */
/* in the completion write scheduler */
al_bool force_rr; /* force RR arbitration in the
* scheduler
*/
// uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
uint8_t q_free_min; /* minimum number of free completion
* entries
*/
/* to qualify for promotion */
uint16_t comp_fifo_depth; /* Size of completion fifo in words */
uint16_t unack_fifo_depth; /* Size of unacked fifo in descs */
uint32_t timeout; /* Ack timout from stream interface */
};
/** M2S UDMA DWRR configuration */
struct al_udma_m2s_dwrr_conf {
al_bool enable_dwrr;
uint8_t inc_factor;
uint8_t weight;
al_bool pkt_mode;
uint32_t deficit_init_val;
};
/** M2S DMA Rate Limitation mode */
struct al_udma_m2s_rlimit_mode {
al_bool pkt_mode_en;
uint16_t short_cycle_sz;
uint32_t token_init_val;
};
/** M2S Stream/Q Rate Limitation */
struct al_udma_m2s_rlimit_cfg {
uint32_t max_burst_sz; /* maximum number of accumulated bytes in the
* token counter
*/
uint16_t long_cycle_sz; /* number of short cycles between token fill */
uint32_t long_cycle; /* number of bits to add in each long cycle */
uint32_t short_cycle; /* number of bits to add in each cycle */
uint32_t mask; /* mask the different types of rate limiters */
};
enum al_udma_m2s_rlimit_action {
AL_UDMA_STRM_RLIMIT_ENABLE,
AL_UDMA_STRM_RLIMIT_PAUSE,
AL_UDMA_STRM_RLIMIT_RESET
};
/** M2S UDMA Q scheduling configuration */
struct al_udma_m2s_q_dwrr_conf {
uint32_t max_deficit_cnt_sz; /*maximum number of accumulated bytes
* in the deficit counter
*/
al_bool strict; /* bypass DWRR */
uint8_t axi_qos;
uint16_t q_qos;
uint8_t weight;
};
/** M2S UDMA / UDMA Q scheduling configuration */
struct al_udma_m2s_sc {
enum al_udma_sch_mode sch_mode; /* Scheduling Mode */
struct al_udma_m2s_dwrr_conf dwrr; /* DWRR configuration */
};
/** UDMA / UDMA Q rate limitation configuration */
struct al_udma_m2s_rlimit {
struct al_udma_m2s_rlimit_mode rlimit_mode;
/* rate limitation enablers */
#if 0
struct al_udma_tkn_bkt_conf token_bkt; /* Token Bucket configuration */
#endif
};
/** UDMA Data read configuration */
struct al_udma_m2s_data_rd_conf {
uint8_t max_rd_d_beats; /* max burst size for reading data
* (in AXI beats-128b) (5b)
*/
uint8_t max_rd_d_out_req; /* max number of outstanding data
* read requests (6b)
*/
uint16_t max_rd_d_out_beats; /* max num. of data read beats (10b) */
};
/** M2S UDMA completion and application timeouts */
struct al_udma_m2s_comp_timeouts {
enum al_udma_sch_mode sch_mode; /* Scheduling mode
* (either strict or RR)
*/
al_bool enable_q_promotion;
uint8_t unack_fifo_depth; /* unacked desc fifo size */
uint8_t comp_fifo_depth; /* desc fifo size */
uint32_t coal_timeout; /* (24b) */
uint32_t app_timeout; /* (24b) */
};
/** S2M UDMA per queue completion configuration */
struct al_udma_s2m_q_comp_conf {
al_bool dis_comp_coal; /* disable completion coalescing */
al_bool en_comp_ring_update; /* enable writing completion descs */
uint32_t comp_timer; /* completion coalescing timer */
al_bool en_hdr_split; /* enable header split */
al_bool force_hdr_split; /* force header split */
uint16_t hdr_split_size; /* size used for the header split */
uint8_t q_qos; /* queue QoS */
};
/** UDMA per queue VMID control configuration */
struct al_udma_gen_vmid_q_conf {
/* Enable usage of the VMID per queue according to 'vmid' */
al_bool queue_en;
/* Enable usage of the VMID from the descriptor buffer address 63:48 */
al_bool desc_en;
/* VMID to be applied when 'queue_en' is asserted */
uint16_t vmid;
/* VMADDR to be applied to msbs when 'desc_en' is asserted.
* Relevant for revisions >= AL_UDMA_REV_ID_REV2 */
uint16_t vmaddr;
};
/** UDMA VMID control configuration */
struct al_udma_gen_vmid_conf {
/* TX queue configuration */
struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q];
/* RX queue configuration */
struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q];
};
/** UDMA VMID MSIX control configuration */
struct al_udma_gen_vmid_msix_conf {
/* Enable write to all VMID_n registers in the MSI-X Controller */
al_bool access_en;
/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
al_bool sel;
};
/** UDMA per Tx queue advanced VMID control configuration */
struct al_udma_gen_vmid_advanced_tx_q_conf {
/**********************************************************************
* Tx Data VMID
**********************************************************************/
/* Tx data VMID enable */
al_bool tx_q_data_vmid_en;
/*
* For Tx data reads, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'tx_q_addr_hi_sel'
*/
unsigned int tx_q_addr_hi;
/*
* For Tx data reads, 6 bits serving the number of bits taken from the
* extra register on account of bits coming from the original address
* field.
* When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken.
* When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any
* value in between, it will start from the MSB bit and sweep down as
* many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final
* address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will
* carry the original buffer address[55:32].
*/
unsigned int tx_q_addr_hi_sel;
/*
* Tx data read VMID
* Masked per bit with 'tx_q_data_vmid_mask'
*/
unsigned int tx_q_data_vmid;
/*
* Tx data read VMID mask
* Each '1' selects from the buffer address, each '0' selects from
* 'tx_q_data_vmid'
*/
unsigned int tx_q_data_vmid_mask;
/**********************************************************************
* Tx prefetch VMID
**********************************************************************/
/* Tx prefetch VMID enable */
al_bool tx_q_prefetch_vmid_en;
/* Tx prefetch VMID */
unsigned int tx_q_prefetch_vmid;
/**********************************************************************
* Tx completion VMID
**********************************************************************/
/* Tx completion VMID enable */
al_bool tx_q_compl_vmid_en;
/* Tx completion VMID */
unsigned int tx_q_compl_vmid;
};
/** UDMA per Rx queue advanced VMID control configuration */
struct al_udma_gen_vmid_advanced_rx_q_conf {
/**********************************************************************
* Rx Data VMID
**********************************************************************/
/* Rx data VMID enable */
al_bool rx_q_data_vmid_en;
/*
* For Rx data writes, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'rx_q_addr_hi_sel'
*/
unsigned int rx_q_addr_hi;
/*
* For Rx data writes, 6 bits serving the number of bits taken from the
* extra register on account of bits coming from the original address
* field.
*/
unsigned int rx_q_addr_hi_sel;
/*
* Rx data write VMID
* Masked per bit with 'rx_q_data_vmid_mask'
*/
unsigned int rx_q_data_vmid;
/* Rx data write VMID mask */
unsigned int rx_q_data_vmid_mask;
/**********************************************************************
* Rx Data Buffer 2 VMID
**********************************************************************/
/* Rx data buff2 VMID enable */
al_bool rx_q_data_buff2_vmid_en;
/*
* For Rx data buff2 writes, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'rx_q_data_buff2_addr_hi_sel'
*/
unsigned int rx_q_data_buff2_addr_hi;
/*
* For Rx data buff2 writes, 6 bits serving the number of bits taken
* from the extra register on account of bits coming from the original
* address field.
*/
unsigned int rx_q_data_buff2_addr_hi_sel;
/*
* Rx data buff2 write VMID
* Masked per bit with 'rx_q_data_buff2_mask'
*/
unsigned int rx_q_data_buff2_vmid;
/* Rx data buff2 write VMID mask */
unsigned int rx_q_data_buff2_mask;
/**********************************************************************
* Rx DDP VMID
**********************************************************************/
/* Rx DDP write VMID enable */
al_bool rx_q_ddp_vmid_en;
/*
* For Rx DDP writes, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'rx_q_ddp_addr_hi_sel'
*/
unsigned int rx_q_ddp_addr_hi;
/*
* For Rx DDP writes, 6 bits serving the number of bits taken from the
* extra register on account of bits coming from the original address
* field.
*/
unsigned int rx_q_ddp_addr_hi_sel;
/*
* Rx DDP write VMID
* Masked per bit with 'rx_q_ddp_mask'
*/
unsigned int rx_q_ddp_vmid;
/* Rx DDP write VMID mask */
unsigned int rx_q_ddp_mask;
/**********************************************************************
* Rx prefetch VMID
**********************************************************************/
/* Rx prefetch VMID enable */
al_bool rx_q_prefetch_vmid_en;
/* Rx prefetch VMID */
unsigned int rx_q_prefetch_vmid;
/**********************************************************************
* Rx completion VMID
**********************************************************************/
/* Rx completion VMID enable */
al_bool rx_q_compl_vmid_en;
/* Rx completion VMID */
unsigned int rx_q_compl_vmid;
};
/**
* Header split, buffer 2 per queue configuration
* When header split is enabled, Buffer_2 is used as an address for the header
* data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined
* that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header
* address.
*/
struct al_udma_gen_hdr_split_buff2_q_conf {
/*
* MSB of the 64-bit address (bits [63:32]) that can be used for header
* split for this queue
*/
unsigned int addr_msb;
/*
* Determine how to select the MSB (bits [63:32]) of the address when
* header split is enabled (4 bits, one per byte)
* - Bits [3:0]:
* [0] selector for bits [39:32]
* [1] selector for bits [47:40]
* [2] selector for bits [55:48]
* [3] selector for bits [63:55]
* - Bit value:
* 0 Use Buffer_1 (legacy operation)
* 1 Use the queue configuration 'addr_msb'
*/
unsigned int add_msb_sel;
};
/* Report Error - to be used for abort */
void al_udma_err_report(struct al_udma *udma);
/* Statistics - TBD */
void al_udma_stats_get(struct al_udma *udma);
/* Misc configurations */
/* Configure AXI configuration */
int al_udma_axi_set(struct udma_gen_axi *axi_regs,
struct al_udma_axi_conf *axi);
/* Configure UDMA AXI M2S configuration */
int al_udma_m2s_axi_set(struct al_udma *udma,
struct al_udma_m2s_axi_conf *axi_m2s);
/* Configure UDMA AXI S2M configuration */
int al_udma_s2m_axi_set(struct al_udma *udma,
struct al_udma_s2m_axi_conf *axi_s2m);
/* Configure M2S packet len */
int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
struct al_udma_m2s_pkt_len_conf *conf);
/* Configure M2S UDMA descriptor prefetch */
int al_udma_m2s_pref_set(struct al_udma *udma,
struct al_udma_m2s_desc_pref_conf *conf);
int al_udma_m2s_pref_get(struct al_udma *udma,
struct al_udma_m2s_desc_pref_conf *conf);
/* set m2s packet's max descriptors (including meta descriptors) */
#define AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET 31
int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs);
/* set s2m packets' max descriptors */
#define AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET 31
int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs);
/* Configure S2M UDMA descriptor prefetch */
int al_udma_s2m_pref_set(struct al_udma *udma,
struct al_udma_s2m_desc_pref_conf *conf);
int al_udma_m2s_pref_get(struct al_udma *udma,
struct al_udma_m2s_desc_pref_conf *conf);
/* Configure S2M UDMA data write */
int al_udma_s2m_data_write_set(struct al_udma *udma,
struct al_udma_s2m_data_write_conf *conf);
/* Configure the s2m full line write feature */
int al_udma_s2m_full_line_write_set(struct al_udma *umda, al_bool enable);
/* Configure S2M UDMA completion */
int al_udma_s2m_completion_set(struct al_udma *udma,
struct al_udma_s2m_completion_conf *conf);
/* Configure the M2S UDMA scheduling mode */
int al_udma_m2s_sc_set(struct al_udma *udma,
struct al_udma_m2s_dwrr_conf *sched);
/* Configure the M2S UDMA rate limitation */
int al_udma_m2s_rlimit_set(struct al_udma *udma,
struct al_udma_m2s_rlimit_mode *mode);
int al_udma_m2s_rlimit_reset(struct al_udma *udma);
/* Configure the M2S Stream rate limitation */
int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
struct al_udma_m2s_rlimit_cfg *conf);
int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
enum al_udma_m2s_rlimit_action act);
/* Configure the M2S UDMA Q rate limitation */
int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
struct al_udma_m2s_rlimit_cfg *conf);
int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
enum al_udma_m2s_rlimit_action act);
/* Configure the M2S UDMA Q scheduling mode */
int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
struct al_udma_m2s_q_dwrr_conf *conf);
int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set);
int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q);
/* M2S UDMA completion and application timeouts */
int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
struct al_udma_m2s_comp_timeouts *conf);
int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
struct al_udma_m2s_comp_timeouts *conf);
/* UDMA get revision */
static INLINE unsigned int al_udma_get_revision(struct unit_regs __iomem *unit_regs)
{
return (al_reg_read32(&unit_regs->gen.dma_misc.revision)
& UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK) >>
UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT;
}
/**
* S2M UDMA Configure the expected behavior of Rx/S2M UDMA when there are no Rx Descriptors.
*
* @param udma
* @param drop_packet when set to true, the UDMA will drop packet.
* @param gen_interrupt when set to true, the UDMA will generate
* no_desc_hint interrupt when a packet received and the UDMA
* doesn't find enough free descriptors for it.
* @param wait_for_desc_timeout timeout in SB cycles to wait for new
* descriptors before dropping the packets.
* Notes:
* - The hint interrupt is raised immediately without waiting
* for new descs.
* - value 0 means wait for ever.
*
* Notes:
* - When get_interrupt is set, the API won't program the iofic to unmask this
* interrupt, in this case the callee should take care for doing that unmask
* using the al_udma_iofic_config() API.
*
* - The hardware's default configuration is: no drop packet, generate hint
* interrupt.
* - This API must be called once and before enabling the UDMA
*
* @return 0 if no error found.
*/
int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout);
/**
* S2M UDMA configure a queue's completion update
*
* @param q_udma
* @param enable set to true to enable completion update
*
* completion update better be disabled for tx queues as those descriptors
* doesn't carry useful information, thus disabling it saves DMA accesses.
*
* @return 0 if no error found.
*/
int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable);
/**
* S2M UDMA configure a queue's completion descriptors coalescing
*
* @param q_udma
* @param enable set to true to enable completion coalescing
* @param coal_timeout in South Bridge cycles.
*
* @return 0 if no error found.
*/
int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t coal_timeout);
/**
* S2M UDMA configure completion descriptors write burst parameters
*
* @param udma
* @param burst_size completion descriptors write burst size in bytes.
*
* @return 0 if no error found.
*/int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
burst_size);
/**
* S2M UDMA configure a queue's completion header split
*
* @param q_udma
* @param enable set to true to enable completion header split
* @param force_hdr_split the header split length will be taken from the queue configuration
* @param hdr_len header split length.
*
* @return 0 if no error found.
*/
int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q,
al_bool enable,
al_bool force_hdr_split,
uint32_t hdr_len);
/* S2M UDMA per queue completion configuration */
int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
struct al_udma_s2m_q_comp_conf *conf);
/** UDMA VMID control configuration */
void al_udma_gen_vmid_conf_set(
struct unit_regs __iomem *unit_regs,
struct al_udma_gen_vmid_conf *conf);
/** UDMA VMID MSIX control configuration */
void al_udma_gen_vmid_msix_conf_set(
struct unit_regs __iomem *unit_regs,
struct al_udma_gen_vmid_msix_conf *conf);
/** UDMA VMID control advanced Tx queue configuration */
void al_udma_gen_vmid_advanced_tx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_vmid_advanced_tx_q_conf *conf);
/** UDMA VMID control advanced Rx queue configuration */
void al_udma_gen_vmid_advanced_rx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_vmid_advanced_rx_q_conf *conf);
/** UDMA header split buffer 2 Rx queue configuration */
void al_udma_gen_hdr_split_buff2_rx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_hdr_split_buff2_q_conf *conf);
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
/** @} end of UDMA config group */
#endif /* __AL_HAL_UDMA_CONFIG_H__ */

View file

@ -0,0 +1,497 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @file al_hal_udma_debug.c
*
* @brief Universal DMA HAL driver for debug
*
*/
#define DEBUG
#include <al_hal_common.h>
#include <al_hal_udma_regs.h>
#include <al_hal_udma_debug.h>
static void al_udma_regs_m2s_axi_print(struct al_udma *udma)
{
al_dbg("M2S AXI regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_wr_cfg_1);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s,
desc_wr_cfg_1,
max_axi_beats,
UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s,
desc_wr_cfg_1,
min_axi_beats,
UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, ostand_cfg);
}
static void al_udma_regs_m2s_general_print(struct al_udma *udma)
{
al_dbg("M2S general regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, state);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
comp_ctrl,
UDMA_M2S_STATE_COMP_CTRL);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
stream_if,
UDMA_M2S_STATE_STREAM_IF);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
rd_ctrl,
UDMA_M2S_STATE_DATA_RD_CTRL);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
desc_pref,
UDMA_M2S_STATE_DESC_PREF);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, err_log_mask);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_0);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, data_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, header_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, unack_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, check_en);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, fifo_en);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, cfg_len);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, stream_cfg);
}
static void al_udma_regs_m2s_rd_print(struct al_udma *udma)
{
al_dbg("M2S read regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_3);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
desc_pref_cfg_3,
min_burst_below_thr,
UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
desc_pref_cfg_3,
min_burst_above_thr,
UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
desc_pref_cfg_3,
pref_thr,
UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, data_cfg);
}
static void al_udma_regs_m2s_dwrr_print(struct al_udma *udma)
{
al_dbg("M2S DWRR regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_dwrr, cfg_sched);
}
static void al_udma_regs_m2s_rate_limiter_print(struct al_udma *udma)
{
al_dbg("M2S rate limiter regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rate_limiter, gen_cfg);
}
static void al_udma_regs_m2s_stream_rate_limiter_print(struct al_udma *udma)
{
al_dbg("M2S stream rate limiter regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
rlimit.cfg_1s);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
rlimit.cfg_cycle);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
rlimit.cfg_token_size_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
rlimit.cfg_token_size_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
rlimit.mask);
}
static void al_udma_regs_m2s_comp_print(struct al_udma *udma)
{
al_dbg("M2S completion regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_1c);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
comp_fifo_depth,
UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
unack_fifo_depth,
UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH);
AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c,
q_promotion,
UDMA_M2S_COMP_CFG_1C_Q_PROMOTION);
AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c,
force_rr,
UDMA_M2S_COMP_CFG_1C_FORCE_RR);
AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
q_free_min,
UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_coal);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_application_ack);
}
static void al_udma_regs_m2s_stat_print(struct al_udma *udma)
{
al_dbg("M2S statistics regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, cfg_st);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_pkt);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_low);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_high);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, prefed_desc);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_pkt);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_desc);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, ack_pkts);
}
static void al_udma_regs_m2s_feature_print(struct al_udma *udma)
{
al_dbg("M2S feature regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_4);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_5);
}
static void al_udma_regs_m2s_q_print(struct al_udma *udma, uint32_t qid)
{
al_dbg("M2S Q[%d] status regs:\n", qid);
al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_pref_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_comp_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_rate_limit_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_dwrr_status);
al_dbg("M2S Q[%d] regs:\n", qid);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], status);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_low);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_high);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrl);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrhp);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrtp);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdcp);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_low);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_high);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrhp);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_1s);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_cycle);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid],
rlimit.cfg_token_size_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid],
rlimit.cfg_token_size_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.mask);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], comp_cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], q_tx_pkt);
}
static void al_udma_regs_s2m_axi_print(struct al_udma *udma)
{
al_dbg("S2M AXI regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_4);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_5);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_wr_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_rd);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_wr);
}
static void al_udma_regs_s2m_general_print(struct al_udma *udma)
{
al_dbg("S2M general regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, state);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, err_log_mask);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_0);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_data_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_header_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, axi_data_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, unack_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, check_en);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, fifo_en);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, stream_cfg);
}
static void al_udma_regs_s2m_rd_print(struct al_udma *udma)
{
al_dbg("S2M read regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_4);
}
static void al_udma_regs_s2m_wr_print(struct al_udma *udma)
{
al_dbg("S2M write regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1);
}
static void al_udma_regs_s2m_comp_print(struct al_udma *udma)
{
al_dbg("S2M completion regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_1c);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_2c);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_application_ack);
}
static void al_udma_regs_s2m_stat_print(struct al_udma *udma)
{
al_dbg("S2M statistics regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, drop_pkt);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_low);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_high);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, prefed_desc);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_pkt);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_desc);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, ack_pkts);
}
static void al_udma_regs_s2m_feature_print(struct al_udma *udma)
{
al_dbg("S2M feature regs:\n");
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_1);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_3);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_4);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_5);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_6);
}
static void al_udma_regs_s2m_q_print(struct al_udma *udma, uint32_t qid)
{
al_dbg("S2M Q[%d] status regs:\n", qid);
al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_pref_fifo_status);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_comp_fifo_status);
al_dbg("S2M Q[%d] regs:\n", qid);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], status);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_low);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_high);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrl);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrhp);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrtp);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdcp);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_low);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_high);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp_internal);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg_2);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], pkt_cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], qos_cfg);
AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], q_rx_pkt);
}
void al_udma_regs_print(struct al_udma *udma, unsigned int mask)
{
uint32_t i;
if (!udma)
return;
if (udma->type == UDMA_TX) {
if (mask & AL_UDMA_DEBUG_AXI)
al_udma_regs_m2s_axi_print(udma);
if (mask & AL_UDMA_DEBUG_GENERAL)
al_udma_regs_m2s_general_print(udma);
if (mask & AL_UDMA_DEBUG_READ)
al_udma_regs_m2s_rd_print(udma);
if (mask & AL_UDMA_DEBUG_DWRR)
al_udma_regs_m2s_dwrr_print(udma);
if (mask & AL_UDMA_DEBUG_RATE_LIMITER)
al_udma_regs_m2s_rate_limiter_print(udma);
if (mask & AL_UDMA_DEBUG_STREAM_RATE_LIMITER)
al_udma_regs_m2s_stream_rate_limiter_print(udma);
if (mask & AL_UDMA_DEBUG_COMP)
al_udma_regs_m2s_comp_print(udma);
if (mask & AL_UDMA_DEBUG_STAT)
al_udma_regs_m2s_stat_print(udma);
if (mask & AL_UDMA_DEBUG_FEATURE)
al_udma_regs_m2s_feature_print(udma);
for (i = 0; i < DMA_MAX_Q; i++) {
if (mask & AL_UDMA_DEBUG_QUEUE(i))
al_udma_regs_m2s_q_print(udma, i);
}
} else {
if (mask & AL_UDMA_DEBUG_AXI)
al_udma_regs_s2m_axi_print(udma);
if (mask & AL_UDMA_DEBUG_GENERAL)
al_udma_regs_s2m_general_print(udma);
if (mask & AL_UDMA_DEBUG_READ)
al_udma_regs_s2m_rd_print(udma);
if (mask & AL_UDMA_DEBUG_WRITE)
al_udma_regs_s2m_wr_print(udma);
if (mask & AL_UDMA_DEBUG_COMP)
al_udma_regs_s2m_comp_print(udma);
if (mask & AL_UDMA_DEBUG_STAT)
al_udma_regs_s2m_stat_print(udma);
if (mask & AL_UDMA_DEBUG_FEATURE)
al_udma_regs_s2m_feature_print(udma);
for (i = 0; i < DMA_MAX_Q; i++) {
if (mask & AL_UDMA_DEBUG_QUEUE(i))
al_udma_regs_s2m_q_print(udma, i);
}
}
}
void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid)
{
struct al_udma_q *queue;
if (!udma)
return;
if (qid >= DMA_MAX_Q)
return;
queue = &udma->udma_q[qid];
al_dbg("Q[%d] struct:\n", qid);
al_dbg(" size_mask = 0x%08x\n", (uint32_t)queue->size_mask);
al_dbg(" q_regs = %p\n", queue->q_regs);
al_dbg(" desc_base_ptr = %p\n", queue->desc_base_ptr);
al_dbg(" next_desc_idx = %d\n", (uint16_t)queue->next_desc_idx);
al_dbg(" desc_ring_id = %d\n", (uint32_t)queue->desc_ring_id);
al_dbg(" cdesc_base_ptr = %p\n", queue->cdesc_base_ptr);
al_dbg(" cdesc_size = %d\n", (uint32_t)queue->cdesc_size);
al_dbg(" next_cdesc_idx = %d\n", (uint16_t)queue->next_cdesc_idx);
al_dbg(" end_cdesc_ptr = %p\n", queue->end_cdesc_ptr);
al_dbg(" comp_head_idx = %d\n", (uint16_t)queue->comp_head_idx);
al_dbg(" comp_head_ptr = %p\n", queue->comp_head_ptr);
al_dbg(" pkt_crnt_descs = %d\n", (uint32_t)queue->pkt_crnt_descs);
al_dbg(" comp_ring_id = %d\n", (uint32_t)queue->comp_ring_id);
al_dbg(" desc_phy_base = 0x%016llx\n", (uint64_t)queue->desc_phy_base);
al_dbg(" cdesc_phy_base = 0x%016llx\n",
(uint64_t)queue->cdesc_phy_base);
al_dbg(" flags = 0x%08x\n", (uint32_t)queue->flags);
al_dbg(" size = %d\n", (uint32_t)queue->size);
al_dbg(" status = %d\n", (uint32_t)queue->status);
al_dbg(" udma = %p\n", queue->udma);
al_dbg(" qid = %d\n", (uint32_t)queue->qid);
}
void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
enum al_udma_ring_type rtype)
{
struct al_udma_q *queue;
uint32_t desc_size;
void *base_ptr;
uint32_t i;
if (!udma)
return;
if (qid >= DMA_MAX_Q)
return;
queue = &udma->udma_q[qid];
if (rtype == AL_RING_SUBMISSION) {
base_ptr = queue->desc_base_ptr;
desc_size = sizeof(union al_udma_desc);
if (base_ptr)
al_dbg("Q[%d] submission ring pointers:\n", qid);
else {
al_dbg("Q[%d] submission ring is not allocated\n", qid);
return;
}
} else {
base_ptr = queue->cdesc_base_ptr;
desc_size = queue->cdesc_size;
if (base_ptr)
al_dbg("Q[%d] completion ring pointers:\n", qid);
else {
al_dbg("Q[%d] completion ring is not allocated\n", qid);
return;
}
}
for (i = 0; i < queue->size; i++) {
uint32_t *curr_addr = (void*)((uint32_t)base_ptr + i * desc_size);
if (desc_size == 16)
al_dbg("[%04d](%p): %08x %08x %08x %08x\n",
i,
curr_addr,
(uint32_t)*curr_addr,
(uint32_t)*(curr_addr+1),
(uint32_t)*(curr_addr+2),
(uint32_t)*(curr_addr+3));
else if (desc_size == 8)
al_dbg("[%04d](%p): %08x %08x\n",
i,
curr_addr,
(uint32_t)*curr_addr,
(uint32_t)*(curr_addr+1));
else if (desc_size == 4)
al_dbg("[%04d](%p): %08x\n",
i,
curr_addr,
(uint32_t)*curr_addr);
else
break;
}
}

View file

@ -0,0 +1,134 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_udma_debug UDMA Debug
* @ingroup group_udma_api
* UDMA Debug
* @{
* @file al_hal_udma_debug.h
*
* @brief C Header file for the Universal DMA HAL driver for debug APIs
*
*/
#ifndef __AL_HAL_UDMA_DEBUG_H__
#define __AL_HAL_UDMA_DEBUG_H__
#include <al_hal_udma.h>
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
/* UDMA register print helper macros */
#define AL_UDMA_PRINT_REG(UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG) \
al_dbg(PREFIX #REG " = 0x%08x" POSTFIX, al_reg_read32( \
&(UDMA->udma_regs->TYPE.GROUP.REG)))
#define AL_UDMA_PRINT_REG_FIELD( \
UDMA, PREFIX, POSTFIX, FMT, TYPE, GROUP, REG, LBL, FIELD) \
al_dbg(PREFIX #LBL " = " FMT POSTFIX, al_reg_read32( \
&(UDMA->udma_regs->TYPE.GROUP.REG)) \
& FIELD ## _MASK >> FIELD ## _SHIFT)
#define AL_UDMA_PRINT_REG_BIT( \
UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG, LBL, FIELD) \
al_dbg(PREFIX #LBL " = %d" POSTFIX, ((al_reg_read32( \
&(UDMA->udma_regs->TYPE.GROUP.REG)) \
& FIELD) != 0))
/* UDMA register print mask definitions */
#define AL_UDMA_DEBUG_QUEUE(n) AL_BIT(n)
#define AL_UDMA_DEBUG_AXI AL_BIT(DMA_MAX_Q)
#define AL_UDMA_DEBUG_GENERAL AL_BIT(DMA_MAX_Q + 1)
#define AL_UDMA_DEBUG_READ AL_BIT(DMA_MAX_Q + 2)
#define AL_UDMA_DEBUG_WRITE AL_BIT(DMA_MAX_Q + 3)
#define AL_UDMA_DEBUG_DWRR AL_BIT(DMA_MAX_Q + 4)
#define AL_UDMA_DEBUG_RATE_LIMITER AL_BIT(DMA_MAX_Q + 5)
#define AL_UDMA_DEBUG_STREAM_RATE_LIMITER AL_BIT(DMA_MAX_Q + 6)
#define AL_UDMA_DEBUG_COMP AL_BIT(DMA_MAX_Q + 7)
#define AL_UDMA_DEBUG_STAT AL_BIT(DMA_MAX_Q + 8)
#define AL_UDMA_DEBUG_FEATURE AL_BIT(DMA_MAX_Q + 9)
#define AL_UDMA_DEBUG_ALL 0xFFFFFFFF
/* Debug functions */
/**
* Print udma registers according to the provided mask
*
* @param udma udma data structure
* @param mask mask that specifies which registers groups to print
* e.g. AL_UDMA_DEBUG_AXI prints AXI registers, AL_UDMA_DEBUG_ALL prints all
* registers
*/
void al_udma_regs_print(struct al_udma *udma, unsigned int mask);
/**
* Print udma queue software structure
*
* @param udma udma data structure
* @param qid queue index
*/
void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid);
/** UDMA ring type */
enum al_udma_ring_type {
AL_RING_SUBMISSION,
AL_RING_COMPLETION
};
/**
* Print the ring entries for the specified queue index and ring type
* (submission/completion)
*
* @param udma udma data structure
* @param qid queue index
* @param rtype udma ring type
*/
void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
enum al_udma_ring_type rtype);
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* __AL_HAL_UDMA_DEBUG_H__ */
/** @} end of UDMA debug group */

View file

@ -0,0 +1,151 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_udma_iofic.c
*
* @brief unit interrupts configurations
*
*/
#include "al_hal_udma_iofic.h"
#include "al_hal_udma_regs.h"
/*
* configure the interrupt registers, interrupts will are kept masked
*/
static int al_udma_main_iofic_config(struct al_iofic_regs __iomem *base,
enum al_iofic_mode mode)
{
switch (mode) {
case AL_IOFIC_MODE_LEGACY:
al_iofic_config(base, AL_INT_GROUP_A,
INT_CONTROL_GRP_SET_ON_POSEDGE |
INT_CONTROL_GRP_MASK_MSI_X |
INT_CONTROL_GRP_CLEAR_ON_READ);
al_iofic_config(base, AL_INT_GROUP_B,
INT_CONTROL_GRP_CLEAR_ON_READ |
INT_CONTROL_GRP_MASK_MSI_X);
al_iofic_config(base, AL_INT_GROUP_C,
INT_CONTROL_GRP_CLEAR_ON_READ |
INT_CONTROL_GRP_MASK_MSI_X);
al_iofic_config(base, AL_INT_GROUP_D,
INT_CONTROL_GRP_SET_ON_POSEDGE |
INT_CONTROL_GRP_MASK_MSI_X |
INT_CONTROL_GRP_CLEAR_ON_READ);
break;
case AL_IOFIC_MODE_MSIX_PER_Q:
al_iofic_config(base, AL_INT_GROUP_A,
INT_CONTROL_GRP_SET_ON_POSEDGE |
INT_CONTROL_GRP_AUTO_MASK |
INT_CONTROL_GRP_AUTO_CLEAR);
al_iofic_config(base, AL_INT_GROUP_B,
INT_CONTROL_GRP_AUTO_CLEAR |
INT_CONTROL_GRP_AUTO_MASK |
INT_CONTROL_GRP_CLEAR_ON_READ);
al_iofic_config(base, AL_INT_GROUP_C,
INT_CONTROL_GRP_AUTO_CLEAR |
INT_CONTROL_GRP_AUTO_MASK |
INT_CONTROL_GRP_CLEAR_ON_READ);
al_iofic_config(base, AL_INT_GROUP_D,
INT_CONTROL_GRP_SET_ON_POSEDGE |
INT_CONTROL_GRP_CLEAR_ON_READ |
INT_CONTROL_GRP_MASK_MSI_X);
break;
case AL_IOFIC_MODE_MSIX_PER_GROUP:
al_iofic_config(base, AL_INT_GROUP_A,
INT_CONTROL_GRP_SET_ON_POSEDGE |
INT_CONTROL_GRP_AUTO_CLEAR |
INT_CONTROL_GRP_AUTO_MASK);
al_iofic_config(base, AL_INT_GROUP_B,
INT_CONTROL_GRP_CLEAR_ON_READ |
INT_CONTROL_GRP_MASK_MSI_X);
al_iofic_config(base, AL_INT_GROUP_C,
INT_CONTROL_GRP_CLEAR_ON_READ |
INT_CONTROL_GRP_MASK_MSI_X);
al_iofic_config(base, AL_INT_GROUP_D,
INT_CONTROL_GRP_SET_ON_POSEDGE |
INT_CONTROL_GRP_CLEAR_ON_READ |
INT_CONTROL_GRP_MASK_MSI_X);
break;
default:
al_err("%s: invalid mode (%d)\n", __func__, mode);
return -EINVAL;
}
al_dbg("%s: base.%p mode %d\n", __func__, base, mode);
return 0;
}
/*
* configure the UDMA interrupt registers, interrupts are kept masked
*/
int al_udma_iofic_config(struct unit_regs __iomem *regs, enum al_iofic_mode mode,
uint32_t m2s_errors_disable,
uint32_t m2s_aborts_disable,
uint32_t s2m_errors_disable,
uint32_t s2m_aborts_disable)
{
int rc;
rc = al_udma_main_iofic_config(&regs->gen.interrupt_regs.main_iofic, mode);
if (rc != 0)
return rc;
al_iofic_unmask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, ~m2s_errors_disable);
al_iofic_abort_mask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, m2s_aborts_disable);
al_iofic_unmask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, ~s2m_errors_disable);
al_iofic_abort_mask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, s2m_aborts_disable);
al_dbg("%s base.%p mode %d\n", __func__, regs, mode);
return 0;
}
/*
* return the offset of the unmask register for a given group
*/
uint32_t __iomem * al_udma_iofic_unmask_offset_get(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level,
int group)
{
al_assert(al_udma_iofic_level_and_group_valid(level, group));
return al_iofic_unmask_offset_get(al_udma_iofic_reg_base_get(regs, level), group);
}
/** @} end of UDMA group */

View file

@ -0,0 +1,614 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_udma_interrupts UDMA I/O Fabric Interrupt Controller
* @ingroup group_udma_api
* UDMA IOFIC API
* @{
* @file al_hal_udma_iofic.h
*
* @brief C Header file for programming the interrupt controller that found
* in UDMA based units. These APIs rely and use some the Interrupt controller
* API under al_hal_iofic.h
*/
#ifndef __AL_HAL_UDMA_IOFIC_H__
#define __AL_HAL_UDMA_IOFIC_H__
#include <al_hal_common.h>
#include <al_hal_iofic.h>
#include <al_hal_udma_regs.h>
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
/**
* Interrupt Mode
* This is the interrupt mode for the primary interrupt level The secondary
* interrupt level does not have mode and it is always a level sensitive
* interrupt that is reflected in group D of the primary.
*/
enum al_iofic_mode {
AL_IOFIC_MODE_LEGACY, /**< level-sensitive interrupt wire */
AL_IOFIC_MODE_MSIX_PER_Q, /**< per UDMA queue MSI-X interrupt */
AL_IOFIC_MODE_MSIX_PER_GROUP
};
/** interrupt controller level (primary/secondary) */
enum al_udma_iofic_level {
AL_UDMA_IOFIC_LEVEL_PRIMARY,
AL_UDMA_IOFIC_LEVEL_SECONDARY
};
/*
* The next four groups represents the standard 4 groups in the primary
* interrupt controller of each bus-master unit in the I/O Fabric.
* The first two groups can be used when accessing the secondary interrupt
* controller as well.
*/
#define AL_INT_GROUP_A 0 /**< summary of the below events */
#define AL_INT_GROUP_B 1 /**< RX completion queues */
#define AL_INT_GROUP_C 2 /**< TX completion queues */
#define AL_INT_GROUP_D 3 /**< Misc */
/*******************************************************************************
* Primary interrupt controller, group A bits
******************************************************************************/
/* Group A bits which are just summary bits of GROUP B, C and D */
#define AL_INT_GROUP_A_GROUP_B_SUM AL_BIT(0)
#define AL_INT_GROUP_A_GROUP_C_SUM AL_BIT(1)
#define AL_INT_GROUP_A_GROUP_D_SUM AL_BIT(2)
/*******************************************************************************
* MSIX entry indices
******************************************************************************/
/** MSIX entry index for summary of group D in group A */
#define AL_INT_MSIX_GROUP_A_SUM_D_IDX 2
/** MSIX entry index for RX completion queue 0 */
#define AL_INT_MSIX_RX_COMPLETION_START 3
/*******************************************************************************
* Primary interrupt controller, group D bits
******************************************************************************/
#define AL_INT_GROUP_D_CROSS_MAIL_BOXES \
(AL_BIT(0) | AL_BIT(1) | AL_BIT(2) | AL_BIT(3))
/** Summary of secondary interrupt controller, group A) */
#define AL_INT_GROUP_D_M2S AL_BIT(8)
/** Summary of secondary interrupt controller, group B) */
#define AL_INT_GROUP_D_S2M AL_BIT(9)
#define AL_INT_GROUP_D_SW_TIMER_INT AL_BIT(10)
#define AL_INT_GROUP_D_APP_EXT_INT AL_BIT(11)
#define AL_INT_GROUP_D_ALL \
AL_INT_GROUP_D_CROSS_MAIL_BOXES | \
AL_INT_GROUP_D_M2S | \
AL_INT_GROUP_D_S2M | \
AL_INT_GROUP_D_SW_TIMER_INT | \
AL_INT_GROUP_D_APP_EXT_INT
/*
* Until this point, all description above is for Groups A/B/C/D in the PRIMARY
* Interrupt controller.
* Following are definitions related to the secondary interrupt controller with
* two cause registers (group A and group B) that covers UDMA M2S/S2M errors.
* Secondary interrupt controller summary bits are not mapped to the Processor
* GIC directly, rather they are represented in Group D of the primary interrupt
* controller.
*/
/******************************************************************************
* Secondary interrupt Controller, Group A, which holds the TX (M2S) error
* interrupt bits
******************************************************************************/
/**
* MSIx response
* MSIX Bus generator response error, the Bus response received with error indication
*/
#define AL_INT_2ND_GROUP_A_M2S_MSIX_RESP AL_BIT(27)
/**
* MSIx timeout MSIX Bus generator timeout error.
* The generator didn't receive bus response for the MSIx write transaction.
*/
#define AL_INT_2ND_GROUP_A_M2S_MSIX_TO AL_BIT(26)
/** Prefetch header buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_HDR_PARITY AL_BIT(25)
/** Prefetch descriptor buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_DESC_PARITY AL_BIT(24)
/** Data buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_DATA_PARITY AL_BIT(23)
/** Data header buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_HDR_PARITY AL_BIT(22)
/** Completion coalescing buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_COMPL_COAL_PARITY AL_BIT(21)
/** UNACK packets buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_UNACK_PKT_PARITY AL_BIT(20)
/** ACK packets buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_ACK_PKT_PARITY AL_BIT(19)
/** AXI data buffer parity error */
#define AL_INT_2ND_GROUP_A_M2S_AX_DATA_PARITY AL_BIT(18)
/**
* Prefetch Ring ID error
* A wrong RingId was received while prefetching submission descriptor. This
* could indicate a software bug or hardware failure, unless the UDMA is
* working in a mode to ignore RingId (the al_udma_iofic_config() API can be
* used to configure the UDMA to ignore the Ring ID check)
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_RING_ID AL_BIT(17)
/**
* Prefetch last
* Error in last bit indication of the descriptor
* Descriptor with Last bit asserted is read from the queue to the prefetch
* FIFO when the prefetch engine is not in a middle of packet processing (a
* descriptor with First bit asserted should be read first to indicate start of
* packet)
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_LAST AL_BIT(16)
/**
* Prefetch first
* Error in first bit indication of the descriptor
* Descriptor with First bit asserted is read from the queue to the prefetch
* FIFO while the prefetch engine is in a middle of packet processing ( a
* descriptor with Last bit asserted should be read to indicate end of packet
* before starting a new one)
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_FIRST AL_BIT(15)
/**
* Prefetch max descriptors
* Number of descriptors per packet exceeds the configurable maximum
* descriptors per packet. This could indicate a software bug or a hardware
* failure. (The al_udma_m2s_max_descs_set() API is used to configure the
* maximum descriptors per packet)
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_MAX_DESC AL_BIT(14)
/**
* Packet length
* Packet length exceeds the configurable maximum packet size. The
* al_udma_m2s_packet_size_cfg_set() API is used to configure the maximum
* packet size)
*/
#define AL_INT_2ND_GROUP_A_M2S_PKT_LEN AL_BIT(13)
/**
* Prefetch AXI timeout
* Bus request to I/O Fabric timeout error
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_TO AL_BIT(12)
/**
* Prefetch AXI response
* Bus response from I/O Fabric error
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_RESP AL_BIT(11)
/**
* Prefetch AXI parity
* Bus parity error on descriptor being prefetched
*/
#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_PARITY AL_BIT(10)
/**
* Data AXI timeout
* Bus request to I/O Fabric timeout error
*/
#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_TO AL_BIT(9)
/**
* Data AXI response
* Bus response from I/O Fabric error
*/
#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_RESP AL_BIT(8)
/**
* Data AXI parity
* Bus parity error on data being read
*/
#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_PARITY AL_BIT(7)
/**
* Completion AXI timeout
* Bus request to I/O Fabric timeout error
*/
#define AL_INT_2ND_GROUP_A_M2S_CONPL_AXI_TO AL_BIT(6)
/**
* Completion AXI response
* Bus response from I/O Fabric error
*/
#define AL_INT_2ND_GROUP_A_M2S_COMPL_AXI_RESP AL_BIT(5)
/**
* Completion AXI parity
* Bus generator internal SRAM parity error
*/
#define AL_INT_2ND_GROUP_A_M2S_COMP_AXI_PARITY AL_BIT(4)
/**
* Stream timeout
* Application stream interface timeout indicating a failure at the Application
* layer (RAID, Ethernet etc)
*/
#define AL_INT_2ND_GROUP_A_M2S_STRM_TO AL_BIT(3)
/**
* Stream response
* Application stream interface response error indicating a failure at the
* Application layer (RAID, Ethernet etc)
*/
#define AL_INT_2ND_GROUP_A_M2S_STRM_RESP AL_BIT(2)
/**
* Stream parity
* Application stream interface parity error indicating a failure at the
* Application layer (RAID, Ethernet etc)
*/
#define AL_INT_2ND_GROUP_A_M2S_STRM_PARITY AL_BIT(1)
/**
* Stream completion mismatch
* Application stream interface, packet serial mismatch error indicating a
* failure at the Application layer (RAID, Ethernet etc)
*/
#define AL_INT_2ND_GROUP_A_M2S_STRM_COMPL_MISMATCH AL_BIT(0)
/*******************************************************************************
* Secondary interrupt Controller, Group B, which holds the RX (S2M) error
* interrupt bits
******************************************************************************/
/** Prefetch descriptor buffer parity error */
#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_DESC_PARITY AL_BIT(30)
/** Completion coalescing buffer parity error */
#define AL_INT_2ND_GROUP_B_S2M_COMPL_COAL_PARITY AL_BIT(29)
/** PRE-UNACK packets buffer parity error */
#define AL_INT_2ND_GROUP_B_S2M_PRE_UNACK_PKT_PARITY AL_BIT(28)
/** UNACK packets buffer parity error */
#define AL_INT_2ND_GROUP_B_S2M_UNACK_PKT_PARITY AL_BIT(27)
/** Data buffer parity error */
#define AL_INT_2ND_GROUP_B_S2M_DATA_PARITY AL_BIT(26)
/** Data header buffer parity error */
#define AL_INT_2ND_GROUP_B_S2M_DATA_HDR_PARITY AL_BIT(25)
/**
* Packet length
* Application stream interface, Data counter length mismatch with metadata
* packet length indicating a failure at the Application layer (RAID, Ethernet
* etc)
*/
#define AL_INT_2ND_GROUP_B_S2M_PKT_LEN AL_BIT(24)
/**
* Stream last
* Application stream interface, error in Last bit indication, this error is
* asserted when a 'last' indication is asserted on the stream interface
* (between the application and the UDMA) when the interface is not in the
* middle of packet, meaning that there was no 'first' indication before. This
* indicates a failure at the application layer.
*/
#define AL_INT_2ND_GROUP_B_S2M_STRM_LAST AL_BIT(23)
/**
* Stream first
* Application stream interface error in first bit indication, this error is
* asserted when a 'first' indication is asserted on the stream interface
* (between the application and the UDMA) when the interface is in the middle
* of packet, meaning that there was a 'first' indication before and the UDMA
* is waiting for a 'last' indication to end the packet. This indicates a
* failure at the application layer.
*/
#define AL_INT_2ND_GROUP_B_S2M_STRM_FIRST AL_BIT(22)
/**
* Stream data
* Application stream interface, error indication during data transaction
*/
#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA AL_BIT(21)
/**
* Stream Data parity
* Application stream interface, parity error during data transaction
*/
#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA_PARITY AL_BIT(20)
/**
* Stream Header error
* Application stream interface, error indication during header transaction
*/
#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR AL_BIT(19)
/**
* Stream Header parity
* Application stream interface, parity error during header transaction
*/
#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR_PARITY AL_BIT(18)
/**
* Completion UNACK
* Completion write, UNACK timeout due to completion FIFO back pressure
*/
#define AL_INT_2ND_GROUP_B_S2M_COMPL_UNACK AL_BIT(17)
/**
* Completion stream
* Completion write, UNACK timeout due to stream ACK FIFO back pressure
*/
#define AL_INT_2ND_GROUP_B_S2M_COMPL_STRM AL_BIT(16)
/**
* Completion AXI timeout
* Bus request to I/O Fabric timeout error
*/
#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_TO AL_BIT(15)
/**
* Completion AXI response
* Bus response from I/O Fabric error
*/
#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_RESP AL_BIT(14)
/**
* Completion AXI parity
* Completion Bus generator internal SRAM parity error
*/
#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_PARITY AL_BIT(13)
/**
* Prefetch saturate
* Prefetch engine, packet length counter saturated (32 bit) , this is caused
* by an error at the application layer which sends packet data without
* 'last'/'first' indication.
*/
#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_SAT AL_BIT(12)
/**
* Prefetch ring ID
* Prefetch engine, Ring ID is not matching the expected RingID. This could
* indicate a software bug or hardware failure, unless the UDMA is working in a
* mode to ignore RingId (the al_udma_iofic_config() API can be used to
* configure the UDMA to ignore the Ring ID check)
*/
#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_RING_ID AL_BIT(11)
/**
* Prefetch AXI timeout
* Bus request to I/O Fabric timeout error
*/
#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_TO AL_BIT(10)
/**
* Prefetch AXI response
* Bus response from I/O Fabric error
*/
#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_RESP AL_BIT(9)
/**
* Prefetch AXI parity
* Bus parity error on descriptor being prefetched
*/
#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_PARITY AL_BIT(8)
/**
* No descriptors hint
* Data write, Hint to the SW that there are not enough descriptors in the
* queue for the current received packet. This is considered a hint and not an
* error, as it could be a normal situation in certain application. The S2M
* UDMA behavior when it runs out of Rx Descriptor is controlled by driver
* which can use this hint to add more descriptors to the Rx queue.
*/
#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_HINT AL_BIT(7)
/**
* No descriptors timeout
* Data write, Timeout indication when there are not enough descriptors for the
* current packet and the timeout expires. The S2M UDMA behavior when it runs
* out of Rx Descriptor is controlled by driver which can use this hint to add
* more descriptors to the Rx queue. The al_udma_s2m_no_desc_cfg_set() is used
* to configure theUDMA S2M timeout and behavior when there are no Rx
* descriptors for the received packet.
*/
#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_TO AL_BIT(6)
/**
* Promotion indication
* Data write, the data write engine checks the queue number of the two packets
* at the head of the data FIFO, the data write engine notify the prefetch
* engine to promote these queue numbers in the prefetch scheduler to make sure
* that these queue will have RX descriptors for these packets. This error
* indicates that the prefetch promotion didn't work for the second packet in
* the FIFO. This is an indication used for system debug and not an error.
*/
#define AL_INT_2ND_GROUP_B_S2M_PROM_IND AL_BIT(5)
/**
* Header split ignored
* Data write, The application requested header split but the buffer descriptor
* doesn't include a second buffer for the header
*/
#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_IGNORED AL_BIT(4)
/**
* Header split length
* Data write, The application requested header split and the length of the
* second buffer allocated for the header is not enough for the requested
* header length. The remaining of the header is written to buffer 1 (data
* buffer).
*/
#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_LEN AL_BIT(3)
/**
* Data AXI timeout
* Bus request to I/O Fabric timeout error
*/
#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_TO AL_BIT(2)
/**
* Data AXI response
* Bus response from I/O Fabric error
*/
#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_RESP AL_BIT(1)
/**
* Data AXI parity
* Bus parity error on data being read
*/
#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_PARITY AL_BIT(0)
/*******************************************************************************
* Configurations
******************************************************************************/
/**
* Configure the UDMA interrupt controller registers, interrupts will are kept
* masked.
* This is a static setting that should be called while initialized the
* interrupt controller within a given UDMA, and should not be modified during
* runtime unless the UDMA is completely disabled. The first argument sets the
* interrupt and MSIX modes. The m2s/s2m errors/abort are a set of bit-wise
* masks to define the behaviour of the UDMA once an error happens: The _abort
* will put the UDMA in abort state once an error happens The _error bitmask
* will indicate and error in the secondary cause register but will not abort.
* The bit-mask that the _errors_disable and _aborts_disable are described in
* 'AL_INT_2ND_GROUP_A_*' and 'AL_INT_2ND_GROUP_B_*'
*
* @param regs pointer to unit registers
* @param mode interrupt scheme mode (legacy, MSI-X..)
* @param m2s_errors_disable
* This is a bit-wise mask, to indicate which one of the error causes in
* secondary interrupt group_A should generate an interrupt. When a bit is
* set, the error cause is ignored.
* Recommended value: 0 (enable all errors).
* @param m2s_aborts_disable
* This is a bit-wise mask, to indicate which one of the error causes in
* secondary interrupt group_A should automatically put the UDMA in
* abort state. When a bit is set, the error cause does cause an abort.
* Recommended value: 0 (enable all aborts).
* @param s2m_errors_disable
* This is a bit-wise mask, to indicate which one of the error causes in
* secondary interrupt group_A should generate an interrupt. When a bit is
* set, the error cause is ignored.
* Recommended value: 0xE0 (disable hint errors).
* @param s2m_aborts_disable
* This is a bit-wise mask, to indicate which one of the error causes in
* secondary interrupt group_A should automatically put the UDMA in
* abort state. When a bit is set, the error cause does cause an abort.
* Recommended value: 0xE0 (disable hint aborts).
*
* @return 0 on success. -EINVAL otherwise.
*/
int al_udma_iofic_config(struct unit_regs __iomem *regs,
enum al_iofic_mode mode,
uint32_t m2s_errors_disable,
uint32_t m2s_aborts_disable,
uint32_t s2m_errors_disable,
uint32_t s2m_aborts_disable);
/**
* return the offset of the unmask register for a given group.
* this function can be used when the upper layer wants to directly
* access the unmask regiter and bypass the al_udma_iofic_unmask() API.
*
* @param regs pointer to udma registers
* @param level the interrupt controller level (primary / secondary)
* @param group the interrupt group ('AL_INT_GROUP_*')
* @return the offset of the unmask register.
*/
uint32_t __iomem * al_udma_iofic_unmask_offset_get(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level,
int group);
/**
* Get the interrupt controller base address for either the primary or secondary
* interrupt controller
*
* @param regs pointer to udma unit registers
* @param level the interrupt controller level (primary / secondary)
*
* @returns The interrupt controller base address
*
*/
static INLINE void __iomem *al_udma_iofic_reg_base_get(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level)
{
void __iomem *iofic_regs = (level == AL_UDMA_IOFIC_LEVEL_PRIMARY) ?
(void __iomem *)&regs->gen.interrupt_regs.main_iofic :
(void __iomem *)&regs->gen.interrupt_regs.secondary_iofic_ctrl;
return iofic_regs;
}
/**
* Check the interrupt controller level/group validity
*
* @param level the interrupt controller level (primary / secondary)
* @param group the interrupt group ('AL_INT_GROUP_*')
*
* @returns 0 - invalid, 1 - valid
*
*/
static INLINE int al_udma_iofic_level_and_group_valid(
enum al_udma_iofic_level level,
int group)
{
if (((level == AL_UDMA_IOFIC_LEVEL_PRIMARY) && (group >= 0) && (group < 4)) ||
((level == AL_UDMA_IOFIC_LEVEL_SECONDARY) && (group >= 0) && (group < 2)))
return 1;
return 0;
}
/**
* unmask specific interrupts for a given group
* this functions uses the interrupt mask clear register to guarantee atomicity
* it's safe to call it while the mask is changed by the HW (auto mask) or another cpu.
*
* @param regs pointer to udma unit registers
* @param level the interrupt controller level (primary / secondary)
* @param group the interrupt group ('AL_INT_GROUP_*')
* @param mask bitwise of interrupts to unmask, set bits will be unmasked.
*/
static INLINE void al_udma_iofic_unmask(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level,
int group,
uint32_t mask)
{
al_assert(al_udma_iofic_level_and_group_valid(level, group));
al_iofic_unmask(al_udma_iofic_reg_base_get(regs, level), group, mask);
}
/**
* mask specific interrupts for a given group
* this functions modifies interrupt mask register, the callee must make sure
* the mask is not changed by another cpu.
*
* @param regs pointer to udma unit registers
* @param level the interrupt controller level (primary / secondary)
* @param group the interrupt group ('AL_INT_GROUP_*')
* @param mask bitwise of interrupts to mask, set bits will be masked.
*/
static INLINE void al_udma_iofic_mask(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level,
int group,
uint32_t mask)
{
al_assert(al_udma_iofic_level_and_group_valid(level, group));
al_iofic_mask(al_udma_iofic_reg_base_get(regs, level), group, mask);
}
/**
* read interrupt cause register for a given group
* this will clear the set bits if the Clear on Read mode enabled.
* @param regs pointer to udma unit registers
* @param level the interrupt controller level (primary / secondary)
* @param group the interrupt group ('AL_INT_GROUP_*')
*/
static INLINE uint32_t al_udma_iofic_read_cause(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level,
int group)
{
al_assert(al_udma_iofic_level_and_group_valid(level, group));
return al_iofic_read_cause(al_udma_iofic_reg_base_get(regs, level), group);
}
#endif
/** @} end of UDMA group */

View file

@ -0,0 +1,66 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __AL_HAL_UDMA_IOFIC_REG_H
#define __AL_HAL_UDMA_IOFIC_REG_H
#include "al_hal_iofic_regs.h"
#ifdef __cplusplus
extern "C" {
#endif
/** This structure covers all interrupt registers of a given UDMA, which is
* built of an al_iofic_regs, which is the common I/O Fabric Interrupt
* controller (IOFIC), and additional two interrupts groups dedicated for the
* application-specific engine attached to the UDMA, the interrupt summary
* of those two groups routed to gourp D of the main controller.
*/
struct udma_iofic_regs {
struct al_iofic_regs main_iofic;
uint32_t rsrvd1[(0x1c00) >> 2];
struct al_iofic_grp_ctrl secondary_iofic_ctrl[2];
};
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_UDMA_IOFIC_REG_H */

View file

@ -0,0 +1,618 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_udma_main.c
*
* @brief Universal DMA HAL driver for main functions (initialization, data path)
*
*/
#include <al_hal_udma.h>
#include <al_hal_udma_config.h>
#define AL_UDMA_Q_RST_TOUT 10000 /* Queue reset timeout [uSecs] */
#define UDMA_STATE_IDLE 0x0
#define UDMA_STATE_NORMAL 0x1
#define UDMA_STATE_ABORT 0x2
#define UDMA_STATE_RESERVED 0x3
const char *const al_udma_states_name[] = {
"Disable",
"Idle",
"Normal",
"Abort",
"Reset"
};
#define AL_UDMA_INITIAL_RING_ID 1
/* dma_q flags */
#define AL_UDMA_Q_FLAGS_IGNORE_RING_ID AL_BIT(0)
#define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE AL_BIT(1)
#define AL_UDMA_Q_FLAGS_EN_COMP_COAL AL_BIT(2)
static void al_udma_set_defaults(struct al_udma *udma)
{
uint32_t tmp;
uint8_t rev_id = udma->rev_id;
if (udma->type == UDMA_TX) {
struct unit_regs* tmp_unit_regs =
(struct unit_regs*)udma->udma_regs;
/* Setting the data fifo depth to 4K (256 strips of 16B)
* This allows the UDMA to have 16 outstanding writes */
if (rev_id >= AL_UDMA_REV_ID_2) {
al_reg_write32_masked(&tmp_unit_regs->m2s.m2s_rd.data_cfg,
UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK,
256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
}
if (rev_id == AL_UDMA_REV_ID_0)
/* disable AXI timeout for M0*/
al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 0);
else
/* set AXI timeout to 1M (~2.6 ms) */
al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
, 0); /* Ack time out */
if (rev_id == AL_UDMA_REV_ID_0) {
tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
tmp &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT;
al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1
, tmp);
}
}
if (udma->type == UDMA_RX) {
al_reg_write32(
&udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
/* Ack time out */
}
}
/**
* misc queue configurations
*
* @param udma_q udma queue data structure
*
* @return 0
*/
static int al_udma_q_config(struct al_udma_q *udma_q)
{
uint32_t *reg_addr;
uint32_t val;
if (udma_q->udma->type == UDMA_TX) {
reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
val = al_reg_read32(reg_addr);
// enable DMB
val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
al_reg_write32(reg_addr, val);
}
return 0;
}
/**
* set the queue's completion configuration register
*
* @param udma_q udma queue data structure
*
* @return 0
*/
static int al_udma_q_config_compl(struct al_udma_q *udma_q)
{
uint32_t *reg_addr;
uint32_t val;
if (udma_q->udma->type == UDMA_TX)
reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
else
reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
val = al_reg_read32(reg_addr);
if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
else
val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
else
val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
al_reg_write32(reg_addr, val);
/* set the completion queue size */
if (udma_q->udma->type == UDMA_RX) {
val = al_reg_read32(
&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
/* the register expects it to be in words */
val |= (udma_q->cdesc_size >> 2)
& UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
, val);
}
return 0;
}
/**
* reset the queues pointers (Head, Tail, etc) and set the base addresses
*
* @param udma_q udma queue data structure
*/
static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
{
/* reset the descriptors ring pointers */
/* assert descriptor base address aligned. */
al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
al_reg_write32(&udma_q->q_regs->rings.drbp_low,
AL_ADDR_LOW(udma_q->desc_phy_base));
al_reg_write32(&udma_q->q_regs->rings.drbp_high,
AL_ADDR_HIGH(udma_q->desc_phy_base));
al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
/* if completion ring update disabled */
if (udma_q->cdesc_base_ptr == NULL) {
udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
} else {
/* reset the completion descriptors ring pointers */
/* assert completion base address aligned. */
al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
al_reg_write32(&udma_q->q_regs->rings.crbp_low,
AL_ADDR_LOW(udma_q->cdesc_phy_base));
al_reg_write32(&udma_q->q_regs->rings.crbp_high,
AL_ADDR_HIGH(udma_q->cdesc_phy_base));
}
al_udma_q_config_compl(udma_q);
return 0;
}
/**
* enable/disable udma queue
*
* @param udma_q udma queue data structure
* @param enable none zero value enables the queue, zero means disable
*
* @return 0
*/
static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
{
uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
if (enable) {
reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
udma_q->status = AL_QUEUE_ENABLED;
} else {
reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
udma_q->status = AL_QUEUE_DISABLED;
}
al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
return 0;
}
/************************ API functions ***************************************/
/* Initializations functions */
/*
* Initialize the udma engine
*/
int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
{
int i;
al_assert(udma);
if (udma_params->num_of_queues > DMA_MAX_Q) {
al_err("udma: invalid num_of_queues parameter\n");
return -EINVAL;
}
udma->type = udma_params->type;
udma->num_of_queues = udma_params->num_of_queues;
udma->gen_regs = &udma_params->udma_regs_base->gen;
if (udma->type == UDMA_TX)
udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->m2s;
else
udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->s2m;
udma->rev_id = al_udma_get_revision(udma_params->udma_regs_base);
if (udma_params->name == NULL)
udma->name = "";
else
udma->name = udma_params->name;
udma->state = UDMA_DISABLE;
for (i = 0; i < DMA_MAX_Q; i++) {
udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
}
/* initialize configuration registers to correct values */
al_udma_set_defaults(udma);
al_dbg("udma [%s] initialized. base %p\n", udma->name,
udma->udma_regs);
return 0;
}
/*
* Initialize the udma queue data structure
*/
int al_udma_q_init(struct al_udma *udma, uint32_t qid,
struct al_udma_q_params *q_params)
{
struct al_udma_q *udma_q;
al_assert(udma);
al_assert(q_params);
if (qid >= udma->num_of_queues) {
al_err("udma: invalid queue id (%d)\n", qid);
return -EINVAL;
}
if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
al_err("udma: queue (%d) already enabled!\n", qid);
return -EIO;
}
if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
al_err("udma: queue (%d) size too small\n", qid);
return -EINVAL;
}
if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
al_err("udma: queue (%d) size too large\n", qid);
return -EINVAL;
}
if (q_params->size & (q_params->size - 1)) {
al_err("udma: queue (%d) size (%d) must be power of 2\n",
q_params->size, qid);
return -EINVAL;
}
udma_q = &udma->udma_q[qid];
/* set the queue's regs base address */
if (udma->type == UDMA_TX)
udma_q->q_regs = (union udma_q_regs __iomem *)
&udma->udma_regs->m2s.m2s_q[qid];
else
udma_q->q_regs = (union udma_q_regs __iomem *)
&udma->udma_regs->s2m.s2m_q[qid];
udma_q->adapter_rev_id = q_params->adapter_rev_id;
udma_q->size = q_params->size;
udma_q->size_mask = q_params->size - 1;
udma_q->desc_base_ptr = q_params->desc_base;
udma_q->desc_phy_base = q_params->desc_phy_base;
udma_q->cdesc_base_ptr = q_params->cdesc_base;
udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
udma_q->cdesc_size = q_params->cdesc_size;
udma_q->next_desc_idx = 0;
udma_q->next_cdesc_idx = 0;
udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
(udma_q->size - 1) * udma_q->cdesc_size;
udma_q->comp_head_idx = 0;
udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
#if 0
udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
AL_M2S_DESC_RING_ID_SHIFT;
#endif
udma_q->pkt_crnt_descs = 0;
udma_q->flags = 0;
udma_q->status = AL_QUEUE_DISABLED;
udma_q->udma = udma;
udma_q->qid = qid;
/* start hardware configuration: */
al_udma_q_config(udma_q);
/* reset the queue pointers */
al_udma_q_set_pointers(udma_q);
/* enable the q */
al_udma_q_enable(udma_q, 1);
al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
" desc ring info: phys base 0x%llx virt base %p\n"
" cdesc ring info: phys base 0x%llx virt base %p "
"entry size 0x%x",
udma_q->udma->name, udma_q->qid,
udma->type == UDMA_TX ? "Tx" : "Rx",
q_params->size,
(unsigned long long)q_params->desc_phy_base,
q_params->desc_base,
(unsigned long long)q_params->cdesc_phy_base,
q_params->cdesc_base,
q_params->cdesc_size);
return 0;
}
/*
* Reset a udma queue
*/
int al_udma_q_reset(struct al_udma_q *udma_q)
{
unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
uint32_t *status_reg;
uint32_t *dcp_reg;
uint32_t *crhp_reg;
uint32_t *q_sw_ctrl_reg;
al_assert(udma_q);
/* De-assert scheduling and prefetch */
al_udma_q_enable(udma_q, 0);
/* Wait for scheduling and prefetch to stop */
status_reg = &udma_q->q_regs->rings.status;
while (remaining_time) {
uint32_t status = al_reg_read32(status_reg);
if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
UDMA_M2S_Q_STATUS_SCHEDULER)))
break;
remaining_time--;
al_udelay(1);
}
if (!remaining_time) {
al_err("udma [%s %d]: %s timeout waiting for prefetch and "
"scheduler disable\n", udma_q->udma->name, udma_q->qid,
__func__);
return -ETIMEDOUT;
}
/* Wait for the completion queue to reach to the same pointer as the
* prefetch stopped at ([TR]DCP == [TR]CRHP) */
dcp_reg = &udma_q->q_regs->rings.dcp;
crhp_reg = &udma_q->q_regs->rings.crhp;
while (remaining_time) {
uint32_t dcp = al_reg_read32(dcp_reg);
uint32_t crhp = al_reg_read32(crhp_reg);
if (dcp == crhp)
break;
remaining_time--;
al_udelay(1);
};
if (!remaining_time) {
al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
udma_q->udma->name, udma_q->qid, __func__);
return -ETIMEDOUT;
}
/* Assert the queue reset */
if (udma_q->udma->type == UDMA_TX)
q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
else
q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
return 0;
}
/*
* return (by reference) a pointer to a specific queue date structure.
*/
int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
struct al_udma_q **q_handle)
{
al_assert(udma);
al_assert(q_handle);
if (unlikely(qid >= udma->num_of_queues)) {
al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
return -EINVAL;
}
*q_handle = &udma->udma_q[qid];
return 0;
}
/*
* Change the UDMA's state
*/
int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
{
uint32_t reg;
al_assert(udma != NULL);
if (state == udma->state)
al_dbg("udma [%s]: requested state identical to "
"current state (%d)\n", udma->name, state);
al_dbg("udma [%s]: change state from (%s) to (%s)\n",
udma->name, al_udma_states_name[udma->state],
al_udma_states_name[state]);
reg = 0;
switch (state) {
case UDMA_DISABLE:
reg |= UDMA_M2S_CHANGE_STATE_DIS;
break;
case UDMA_NORMAL:
reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
break;
case UDMA_ABORT:
reg |= UDMA_M2S_CHANGE_STATE_ABORT;
break;
default:
al_err("udma: invalid state (%d)\n", state);
return -EINVAL;
}
if (udma->type == UDMA_TX)
al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
else
al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
udma->state = state;
return 0;
}
/*
* return the current UDMA hardware state
*/
enum al_udma_state al_udma_state_get(struct al_udma *udma)
{
uint32_t state_reg;
uint32_t comp_ctrl;
uint32_t stream_if;
uint32_t data_rd;
uint32_t desc_pref;
if (udma->type == UDMA_TX)
state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
else
state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
comp_ctrl = AL_REG_FIELD_GET(state_reg,
UDMA_M2S_STATE_COMP_CTRL_MASK,
UDMA_M2S_STATE_COMP_CTRL_SHIFT);
stream_if = AL_REG_FIELD_GET(state_reg,
UDMA_M2S_STATE_STREAM_IF_MASK,
UDMA_M2S_STATE_STREAM_IF_SHIFT);
data_rd = AL_REG_FIELD_GET(state_reg,
UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
desc_pref = AL_REG_FIELD_GET(state_reg,
UDMA_M2S_STATE_DESC_PREF_MASK,
UDMA_M2S_STATE_DESC_PREF_SHIFT);
al_assert(comp_ctrl != UDMA_STATE_RESERVED);
al_assert(stream_if != UDMA_STATE_RESERVED);
al_assert(data_rd != UDMA_STATE_RESERVED);
al_assert(desc_pref != UDMA_STATE_RESERVED);
/* if any of the states is abort then return abort */
if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
|| (data_rd == UDMA_STATE_ABORT)
|| (desc_pref == UDMA_STATE_ABORT))
return UDMA_ABORT;
/* if any of the states is normal then return normal */
if ((comp_ctrl == UDMA_STATE_NORMAL)
|| (stream_if == UDMA_STATE_NORMAL)
|| (data_rd == UDMA_STATE_NORMAL)
|| (desc_pref == UDMA_STATE_NORMAL))
return UDMA_NORMAL;
return UDMA_IDLE;
}
/*
* Action handling
*/
/*
* get next completed packet from completion ring of the queue
*/
uint32_t al_udma_cdesc_packet_get(
struct al_udma_q *udma_q,
volatile union al_udma_cdesc **cdesc)
{
uint32_t count;
volatile union al_udma_cdesc *curr;
uint32_t comp_flags;
/* this function requires the completion ring update */
al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
/* comp_head points to the last comp desc that was processed */
curr = udma_q->comp_head_ptr;
comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
/* check if the completion descriptor is new */
if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
return 0;
/* if new desc found, increment the current packets descriptors */
count = udma_q->pkt_crnt_descs + 1;
while (!cdesc_is_last(comp_flags)) {
curr = al_cdesc_next_update(udma_q, curr);
comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
== AL_FALSE)) {
/* the current packet here doesn't have all */
/* descriptors completed. log the current desc */
/* location and number of completed descriptors so */
/* far. then return */
udma_q->pkt_crnt_descs = count;
udma_q->comp_head_ptr = curr;
return 0;
}
count++;
/* check against max descs per packet. */
al_assert(count <= udma_q->size);
}
/* return back the first descriptor of the packet */
*cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
udma_q->pkt_crnt_descs = 0;
udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
" descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
udma_q->next_cdesc_idx, count);
return count;
}
/** @} end of UDMA group */

View file

@ -0,0 +1,104 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_udma_regs.h
*
* @brief udma registers definition
*
*
*/
#ifndef __AL_HAL_UDMA_REG_H
#define __AL_HAL_UDMA_REG_H
#include "al_hal_udma_regs_m2s.h"
#include "al_hal_udma_regs_s2m.h"
#include "al_hal_udma_regs_gen.h"
#define AL_UDMA_REV_ID_REV0 0
#define AL_UDMA_REV_ID_REV1 1
#define AL_UDMA_REV_ID_REV2 2
#ifdef __cplusplus
extern "C" {
#endif
/** UDMA registers, either m2s or s2m */
union udma_regs {
struct udma_m2s_regs m2s;
struct udma_s2m_regs s2m;
};
struct unit_regs {
struct udma_m2s_regs m2s;
uint32_t rsrvd0[(0x10000 - sizeof(struct udma_m2s_regs)) >> 2];
struct udma_s2m_regs s2m;
uint32_t rsrvd1[((0x1C000 - 0x10000) - sizeof(struct udma_s2m_regs)) >> 2];
struct udma_gen_regs gen;
};
/** UDMA submission and completion registers, M2S and S2M UDMAs have same stucture */
struct udma_rings_regs {
uint32_t rsrvd0[8];
uint32_t cfg; /* Descriptor ring configuration */
uint32_t status; /* Descriptor ring status and information */
uint32_t drbp_low; /* Descriptor Ring Base Pointer [31:4] */
uint32_t drbp_high; /* Descriptor Ring Base Pointer [63:32] */
uint32_t drl; /* Descriptor Ring Length[23:2] */
uint32_t drhp; /* Descriptor Ring Head Pointer */
uint32_t drtp_inc; /* Descriptor Tail Pointer increment */
uint32_t drtp; /* Descriptor Tail Pointer */
uint32_t dcp; /* Descriptor Current Pointer */
uint32_t crbp_low; /* Completion Ring Base Pointer [31:4] */
uint32_t crbp_high; /* Completion Ring Base Pointer [63:32] */
uint32_t crhp; /* Completion Ring Head Pointer */
uint32_t crhp_internal; /* Completion Ring Head Pointer internal, before AX ... */
};
/** M2S and S2M generic structure of Q registers */
union udma_q_regs {
struct udma_rings_regs rings;
struct udma_m2s_q m2s_q;
struct udma_s2m_q s2m_q;
};
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_UDMA_REG_H */
/** @} end of UDMA group */

View file

@ -0,0 +1,414 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @file al_hal_udma_regs_gen.h
*
* @brief C Header file for the UDMA general registers
*
*/
#ifndef __AL_HAL_UDMA_GEN_REG_H
#define __AL_HAL_UDMA_GEN_REG_H
#include "al_hal_udma_iofic_regs.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Unit Registers
*/
struct udma_gen_dma_misc {
/* [0x0] Reserved register for the interrupt controller */
uint32_t int_cfg;
/* [0x4] Revision register */
uint32_t revision;
/* [0x8] Reserved for future use */
uint32_t general_cfg_1;
/* [0xc] Reserved for future use */
uint32_t general_cfg_2;
/* [0x10] Reserved for future use */
uint32_t general_cfg_3;
/* [0x14] Reserved for future use */
uint32_t general_cfg_4;
/* [0x18] General timer configuration */
uint32_t general_cfg_5;
uint32_t rsrvd[57];
};
struct udma_gen_mailbox {
/*
* [0x0] Mailbox interrupt generator.
* Generates interrupt to neighbor DMA
*/
uint32_t interrupt;
/* [0x4] Mailbox message data out */
uint32_t msg_out;
/* [0x8] Mailbox message data in */
uint32_t msg_in;
uint32_t rsrvd[13];
};
struct udma_gen_axi {
/* [0x0] Configuration of the AXI masters */
uint32_t cfg_1;
/* [0x4] Configuration of the AXI masters */
uint32_t cfg_2;
/* [0x8] Configuration of the AXI masters. Endianess configuration */
uint32_t endian_cfg;
uint32_t rsrvd[61];
};
struct udma_gen_sram_ctrl {
/* [0x0] Timing configuration */
uint32_t timing;
};
struct udma_gen_vmid {
/* [0x0] VMID control */
uint32_t cfg_vmid_0;
/* [0x4] TX queue 0/1 VMID */
uint32_t cfg_vmid_1;
/* [0x8] TX queue 2/3 VMID */
uint32_t cfg_vmid_2;
/* [0xc] RX queue 0/1 VMID */
uint32_t cfg_vmid_3;
/* [0x10] RX queue 2/3 VMID */
uint32_t cfg_vmid_4;
};
struct udma_gen_vmaddr {
/* [0x0] TX queue 0/1 VMADDR */
uint32_t cfg_vmaddr_0;
/* [0x4] TX queue 2/3 VMADDR */
uint32_t cfg_vmaddr_1;
/* [0x8] RX queue 0/1 VMADDR */
uint32_t cfg_vmaddr_2;
/* [0xc] RX queue 2/3 VMADDR */
uint32_t cfg_vmaddr_3;
};
struct udma_gen_vmpr {
/* [0x0] TX VMPR control */
uint32_t cfg_vmpr_0;
/* [0x4] TX VMPR Address High Regsiter */
uint32_t cfg_vmpr_1;
/* [0x8] TX queue VMID values */
uint32_t cfg_vmpr_2;
/* [0xc] TX queue VMID values */
uint32_t cfg_vmpr_3;
/* [0x10] RX VMPR control */
uint32_t cfg_vmpr_4;
/* [0x14] RX VMPR Buffer2 MSB address */
uint32_t cfg_vmpr_5;
/* [0x18] RX queue VMID values */
uint32_t cfg_vmpr_6;
/* [0x1c] RX queue BUF1 VMID values */
uint32_t cfg_vmpr_7;
/* [0x20] RX queue BUF2 VMID values */
uint32_t cfg_vmpr_8;
/* [0x24] RX queue Direct Data Placement VMID values */
uint32_t cfg_vmpr_9;
/* [0x28] RX VMPR BUF1 Address High Regsiter */
uint32_t cfg_vmpr_10;
/* [0x2c] RX VMPR BUF2 Address High Regsiter */
uint32_t cfg_vmpr_11;
/* [0x30] RX VMPR DDP Address High Regsiter */
uint32_t cfg_vmpr_12;
uint32_t rsrvd[3];
};
struct udma_gen_regs {
struct udma_iofic_regs interrupt_regs; /* [0x0000] */
struct udma_gen_dma_misc dma_misc; /* [0x2080] */
struct udma_gen_mailbox mailbox[4]; /* [0x2180] */
struct udma_gen_axi axi; /* [0x2280] */
struct udma_gen_sram_ctrl sram_ctrl[25]; /* [0x2380] */
uint32_t rsrvd_1[2];
struct udma_gen_vmid vmid; /* [0x23ec] */
struct udma_gen_vmaddr vmaddr; /* [0x2400] */
uint32_t rsrvd_2[252];
struct udma_gen_vmpr vmpr[4]; /* [0x2800] */
};
/*
* Registers Fields
*/
/**** int_cfg register ****/
/*
* MSIX data width
* 1 - 64 bit
* 0 32 bit
*/
#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_64 (1 << 0)
/* General configuration */
#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_MASK 0x0000000E
#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_SHIFT 1
/* MSIx AXI QoS */
#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_MASK 0x00000070
#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_SHIFT 4
#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_MASK 0xFFFFFF80
#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_SHIFT 7
/**** revision register ****/
/* Design programming interface revision ID */
#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK 0x00000FFF
#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT 0
/* Design minor revision ID */
#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_MASK 0x00FFF000
#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_SHIFT 12
/* Design major revision ID */
#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_MASK 0xFF000000
#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_SHIFT 24
/**** Interrupt register ****/
/* Generate interrupt to another DMA */
#define UDMA_GEN_MAILBOX_INTERRUPT_SET (1 << 0)
/**** cfg_2 register ****/
/*
* Enable arbitration promotion.
* Increment master priority after configured number of arbitration cycles
*/
#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK 0x0000000F
#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_SHIFT 0
/**** endian_cfg register ****/
/* Swap M2S descriptor read and completion descriptor write. */
#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC (1 << 0)
/* Swap M2S data read. */
#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA (1 << 1)
/* Swap S2M descriptor read and completion descriptor write. */
#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC (1 << 2)
/* Swap S2M data write. */
#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA (1 << 3)
/*
* Swap 32 or 64 bit mode:
* 0 - Swap groups of 4 bytes
* 1 - Swap groups of 8 bytes
*/
#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN (1 << 4)
/**** timing register ****/
/* Write margin */
#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_MASK 0x0000000F
#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_SHIFT 0
/* Write margin enable */
#define UDMA_GEN_SRAM_CTRL_TIMING_RMEA (1 << 8)
/* Read margin */
#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_MASK 0x000F0000
#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_SHIFT 16
/* Read margin enable */
#define UDMA_GEN_SRAM_CTRL_TIMING_RMEB (1 << 24)
/**** cfg_vmid_0 register ****/
/* For M2S queues 3:0, enable usage of the VMID from the buffer address 63:56 */
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK 0x0000000F
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT 0
/*
* For M2S queues 3:0, enable usage of the VMID from the configuration register
* (cfg_vmid_1/2 used for M2S queue_x)
*/
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK 0x000000F0
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT 4
/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL (1 << 8)
/* Enable write to all VMID_n registers in the MSI-X Controller */
#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN (1 << 9)
/* For S2M queues 3:0, enable usage of the VMID from the buffer address 63:56 */
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK 0x000F0000
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT 16
/*
* For S2M queues 3:0, enable usage of the VMID from the configuration register
* (cfg_vmid_3/4 used for M2S queue_x)
*/
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK 0x00F00000
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT 20
/**** cfg_vmid_1 register ****/
/* TX queue 0 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT 0
/* TX queue 1 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT 16
/**** cfg_vmid_2 register ****/
/* TX queue 2 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT 0
/* TX queue 3 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT 16
/**** cfg_vmid_3 register ****/
/* RX queue 0 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT 0
/* RX queue 1 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT 16
/**** cfg_vmid_4 register ****/
/* RX queue 2 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT 0
/* RX queue 3 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT 16
/**** cfg_vmaddr_0 register ****/
/* TX queue 0 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT 0
/* TX queue 1 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT 16
/**** cfg_vmaddr_1 register ****/
/* TX queue 2 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT 0
/* TX queue 3 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT 16
/**** cfg_vmaddr_2 register ****/
/* RX queue 0 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT 0
/* RX queue 1 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT 16
/**** cfg_vmaddr_3 register ****/
/* RX queue 2 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT 0
/* RX queue 3 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT 16
/**** cfg_vmpr_0 register ****/
/* TX High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK 0x0000003F
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_SHIFT 0
/* TX Data VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN (1 << 7)
/* TX Prefetch VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN (1 << 28)
/* TX Completions VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN (1 << 29)
/**** cfg_vmpr_2 register ****/
/* TX queue Prefetch VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT 0
/* TX queue Completion VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT 16
/**** cfg_vmpr_3 register ****/
/* TX queue Data VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT 0
/* TX queue Data VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT 16
/**** cfg_vmpr_4 register ****/
/* RX Data Buffer1 - High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK 0x0000003F
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT 0
/* RX Data Buffer1 VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN (1 << 7)
/* RX Data Buffer2 - High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK 0x00003F00
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT 8
/* RX Data Buffer2 VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN (1 << 15)
/* RX Direct Data Placement - High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK 0x003F0000
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT 16
/* RX Direct Data Placement VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN (1 << 23)
/* RX Buffer 2 MSB address word selects per bytes, per queue */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK 0x0F000000
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT 24
/* RX Prefetch VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN (1 << 28)
/* RX Completions VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN (1 << 29)
/**** cfg_vmpr_6 register ****/
/* RX queue Prefetch VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT 0
/* RX queue Completion VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT 16
/**** cfg_vmpr_7 register ****/
/* RX queue Data Buffer 1 VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT 0
/* RX queue Data Buffer 1 VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT 16
/**** cfg_vmpr_8 register ****/
/* RX queue Data Buffer 2 VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT 0
/* RX queue Data Buffer 2 VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT 16
/**** cfg_vmpr_9 register ****/
/* RX queue DDP VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT 0
/* RX queue DDP VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT 16
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_UDMA_GEN_REG_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,998 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @file al_hal_udma_regs_s2m.h
*
* @brief C Header file for the UDMA S2M registers
*
*/
#ifndef __AL_HAL_UDMA_S2M_REG_H
#define __AL_HAL_UDMA_S2M_REG_H
#include "al_hal_plat_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Unit Registers
*/
struct udma_axi_s2m {
/* [0x0] Data write master configuration */
uint32_t data_wr_cfg_1;
/* [0x4] Data write master configuration */
uint32_t data_wr_cfg_2;
/* [0x8] Descriptor read master configuration */
uint32_t desc_rd_cfg_4;
/* [0xc] Descriptor read master configuration */
uint32_t desc_rd_cfg_5;
/* [0x10] Completion write master configuration */
uint32_t comp_wr_cfg_1;
/* [0x14] Completion write master configuration */
uint32_t comp_wr_cfg_2;
/* [0x18] Data write master configuration */
uint32_t data_wr_cfg;
/* [0x1c] Descriptors read master configuration */
uint32_t desc_rd_cfg_3;
/* [0x20] Completion descriptors write master configuration */
uint32_t desc_wr_cfg_1;
/* [0x24] AXI outstanding read configuration */
uint32_t ostand_cfg_rd;
/* [0x28] AXI outstanding write configuration */
uint32_t ostand_cfg_wr;
uint32_t rsrvd[53];
};
struct udma_s2m {
/*
* [0x0] DMA state
* 00 - No pending tasks
* 01 Normal (active)
* 10 Abort (error condition)
* 11 Reserved
*/
uint32_t state;
/* [0x4] CPU request to change DMA state */
uint32_t change_state;
uint32_t rsrvd_0;
/*
* [0xc] S2M DMA error log mask.
* Each error has an interrupt controller cause bit.
* This register determines if these errors cause the S2M DMA to log the
* error condition.
* 0 - Log is enable
* 1 - Log is masked.
*/
uint32_t err_log_mask;
uint32_t rsrvd_1;
/*
* [0x14] DMA header log
* Sample the packet header that caused the error
*/
uint32_t log_0;
/*
* [0x18] DMA header log
* Sample the packet header that caused the error.
*/
uint32_t log_1;
/*
* [0x1c] DMA header log
* Sample the packet header that caused the error.
*/
uint32_t log_2;
/*
* [0x20] DMA header log
* Sample the packet header that caused the error
*/
uint32_t log_3;
/* [0x24] DMA clear error log */
uint32_t clear_err_log;
/* [0x28] S2M stream data FIFO status */
uint32_t s_data_fifo_status;
/* [0x2c] S2M stream header FIFO status */
uint32_t s_header_fifo_status;
/* [0x30] S2M AXI data FIFO status */
uint32_t axi_data_fifo_status;
/* [0x34] S2M unack FIFO status */
uint32_t unack_fifo_status;
/* [0x38] Select queue for debug */
uint32_t indirect_ctrl;
/*
* [0x3c] S2M prefetch FIFO status.
* Status of the selected queue in S2M_indirect_ctrl
*/
uint32_t sel_pref_fifo_status;
/*
* [0x40] S2M completion FIFO status.
* Status of the selected queue in S2M_indirect_ctrl
*/
uint32_t sel_comp_fifo_status;
/* [0x44] S2M state machine and FIFO clear control */
uint32_t clear_ctrl;
/* [0x48] S2M Misc Check enable */
uint32_t check_en;
/* [0x4c] S2M FIFO enable control, internal */
uint32_t fifo_en;
/* [0x50] Stream interface configuration */
uint32_t stream_cfg;
uint32_t rsrvd[43];
};
struct udma_s2m_rd {
/* [0x0] S2M descriptor prefetch configuration */
uint32_t desc_pref_cfg_1;
/* [0x4] S2M descriptor prefetch configuration */
uint32_t desc_pref_cfg_2;
/* [0x8] S2M descriptor prefetch configuration */
uint32_t desc_pref_cfg_3;
/* [0xc] S2M descriptor prefetch configuration */
uint32_t desc_pref_cfg_4;
uint32_t rsrvd[12];
};
struct udma_s2m_wr {
/* [0x0] Stream data FIFO configuration */
uint32_t data_cfg_1;
/* [0x4] Data write configuration */
uint32_t data_cfg_2;
uint32_t rsrvd[14];
};
struct udma_s2m_comp {
/* [0x0] Completion controller configuration */
uint32_t cfg_1c;
/* [0x4] Completion controller configuration */
uint32_t cfg_2c;
uint32_t rsrvd_0;
/* [0xc] Completion controller application acknowledge configuration */
uint32_t cfg_application_ack;
uint32_t rsrvd[12];
};
struct udma_s2m_stat {
uint32_t rsrvd_0;
/* [0x4] Number of dropped packets */
uint32_t drop_pkt;
/*
* [0x8] Counting the net length of the data buffers [64-bit]
* Should be read before rx_bytes_high
*/
uint32_t rx_bytes_low;
/*
* [0xc] Counting the net length of the data buffers [64-bit]
* Should be read after tx_bytes_low (value is sampled when reading
* Should be read before rx_bytes_low
*/
uint32_t rx_bytes_high;
/* [0x10] Total number of descriptors read from the host memory */
uint32_t prefed_desc;
/* [0x14] Number of packets written into the completion ring */
uint32_t comp_pkt;
/* [0x18] Number of descriptors written into the completion ring */
uint32_t comp_desc;
/*
* [0x1c] Number of acknowledged packets.
* (acknowledge sent to the stream interface)
*/
uint32_t ack_pkts;
uint32_t rsrvd[56];
};
struct udma_s2m_feature {
/*
* [0x0] S2M Feature register
* S2M instantiation parameters
*/
uint32_t reg_1;
/* [0x4] Reserved S2M feature register */
uint32_t reg_2;
/*
* [0x8] S2M Feature register
* S2M instantiation parameters
*/
uint32_t reg_3;
/*
* [0xc] S2M Feature register.
* S2M instantiation parameters.
*/
uint32_t reg_4;
/*
* [0x10] S2M Feature register.
* S2M instantiation parameters.
*/
uint32_t reg_5;
/* [0x14] S2M Feature register. S2M instantiation parameters. */
uint32_t reg_6;
uint32_t rsrvd[58];
};
struct udma_s2m_q {
uint32_t rsrvd_0[8];
/* [0x20] S2M Descriptor ring configuration */
uint32_t cfg;
/* [0x24] S2M Descriptor ring status and information */
uint32_t status;
/* [0x28] Rx Descriptor Ring Base Pointer [31:4] */
uint32_t rdrbp_low;
/* [0x2c] Rx Descriptor Ring Base Pointer [63:32] */
uint32_t rdrbp_high;
/*
* [0x30] Rx Descriptor Ring Length[23:2]
*/
uint32_t rdrl;
/* [0x34] RX Descriptor Ring Head Pointer */
uint32_t rdrhp;
/* [0x38] Rx Descriptor Tail Pointer increment */
uint32_t rdrtp_inc;
/* [0x3c] Rx Descriptor Tail Pointer */
uint32_t rdrtp;
/* [0x40] RX Descriptor Current Pointer */
uint32_t rdcp;
/* [0x44] Rx Completion Ring Base Pointer [31:4] */
uint32_t rcrbp_low;
/* [0x48] Rx Completion Ring Base Pointer [63:32] */
uint32_t rcrbp_high;
/* [0x4c] Rx Completion Ring Head Pointer */
uint32_t rcrhp;
/*
* [0x50] RX Completion Ring Head Pointer internal.
* (Before the coalescing FIFO)
*/
uint32_t rcrhp_internal;
/* [0x54] Completion controller configuration for the queue */
uint32_t comp_cfg;
/* [0x58] Completion controller configuration for the queue */
uint32_t comp_cfg_2;
/* [0x5c] Packet handler configuration */
uint32_t pkt_cfg;
/* [0x60] Queue QoS configuration */
uint32_t qos_cfg;
/* [0x64] DMB software control */
uint32_t q_sw_ctrl;
/* [0x68] Number of S2M Rx packets after completion */
uint32_t q_rx_pkt;
uint32_t rsrvd[997];
};
struct udma_s2m_regs {
uint32_t rsrvd_0[64];
struct udma_axi_s2m axi_s2m; /* [0x100] */
struct udma_s2m s2m; /* [0x200] */
struct udma_s2m_rd s2m_rd; /* [0x300] */
struct udma_s2m_wr s2m_wr; /* [0x340] */
struct udma_s2m_comp s2m_comp; /* [0x380] */
uint32_t rsrvd_1[80];
struct udma_s2m_stat s2m_stat; /* [0x500] */
struct udma_s2m_feature s2m_feature; /* [0x600] */
uint32_t rsrvd_2[576];
struct udma_s2m_q s2m_q[4]; /* [0x1000] */
};
/*
* Registers Fields
*/
/**** data_wr_cfg_1 register ****/
/* AXI write ID (AWID) */
#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_MASK 0x000000FF
#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_SHIFT 0
/* Cache Type */
#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_MASK 0x000F0000
#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_SHIFT 16
/* Burst type */
#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_MASK 0x03000000
#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_SHIFT 24
/**** data_wr_cfg_2 register ****/
/* User extension */
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_MASK 0x000FFFFF
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_SHIFT 0
/* Bus size, 128-bit */
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_MASK 0x00700000
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_SHIFT 20
/*
* AXI Master QoS.
* Used for arbitration between AXI masters
*/
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_MASK 0x07000000
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_SHIFT 24
/* Protection Type */
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_MASK 0x70000000
#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_SHIFT 28
/**** desc_rd_cfg_4 register ****/
/* AXI read ID (ARID) */
#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_MASK 0x000000FF
#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_SHIFT 0
/* Cache Type */
#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_MASK 0x000F0000
#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_SHIFT 16
/* Burst type */
#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_MASK 0x03000000
#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_SHIFT 24
/**** desc_rd_cfg_5 register ****/
/* User extension */
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_MASK 0x000FFFFF
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_SHIFT 0
/* Bus size, 128-bit */
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_MASK 0x00700000
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_SHIFT 20
/*
* AXI Master QoS.
* Used for arbitration between AXI masters
*/
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_MASK 0x07000000
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_SHIFT 24
/* Protection Type */
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_MASK 0x70000000
#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_SHIFT 28
/**** comp_wr_cfg_1 register ****/
/* AXI write ID (AWID) */
#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK 0x000000FF
#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_SHIFT 0
/* Cache Type */
#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000
#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT 16
/* Burst type */
#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK 0x03000000
#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT 24
/**** comp_wr_cfg_2 register ****/
/* User extension */
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_SHIFT 0
/* Bus size, 128-bit */
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT 20
/*
* AXI Master QoS.
* Used for arbitration between AXI masters
*/
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK 0x07000000
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT 24
/* Protection Type */
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK 0x70000000
#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT 28
/**** data_wr_cfg register ****/
/*
* Defines the maximum number of AXI beats for a single AXI burst. This value is
* used for the burst split decision.
*/
#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_MASK 0x000000FF
#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_SHIFT 0
/**** desc_rd_cfg_3 register ****/
/*
* Defines the maximum number of AXI beats for a single AXI burst. This value is
* used for the burst split decision.
*/
#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF
#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0
/*
* Enables breaking descriptor read request.
* Aligned to max_AXI_beats when the total read size is less than max_AXI_beats.
*/
#define UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
/**** desc_wr_cfg_1 register ****/
/*
* Defines the maximum number of AXI beats for a single AXI burst. This value is
* used for the burst split decision.
*/
#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF
#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0
/*
* Minimum burst for writing completion descriptors.
* (AXI beats).
* Value must be aligned to cache lines (64 bytes).
* Default value is 2 cache lines, 8 beats.
*/
#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000
#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16
/**** ostand_cfg_rd register ****/
/*
* Maximum number of outstanding descriptor reads to the AXI.
* (AXI transactions).
*/
#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK 0x0000003F
#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_SHIFT 0
/* Maximum number of outstanding stream acknowledges. */
#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK 0x001F0000
#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT 16
/**** ostand_cfg_wr register ****/
/*
* Maximum number of outstanding data writes to the AXI.
* (AXI transactions).
*/
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK 0x0000003F
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_SHIFT 0
/*
* Maximum number of outstanding data beats for data write to AXI.
* (AXI beats).
*/
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8
/*
* Maximum number of outstanding descriptor writes to the AXI.
* (AXI transactions).
*/
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK 0x003F0000
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT 16
/*
* Maximum number of outstanding data beats for descriptor write to AXI.
* (AXI beats).
*/
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
/**** state register ****/
#define UDMA_S2M_STATE_COMP_CTRL_MASK 0x00000003
#define UDMA_S2M_STATE_COMP_CTRL_SHIFT 0
#define UDMA_S2M_STATE_STREAM_IF_MASK 0x00000030
#define UDMA_S2M_STATE_STREAM_IF_SHIFT 4
#define UDMA_S2M_STATE_DATA_WR_CTRL_MASK 0x00000300
#define UDMA_S2M_STATE_DATA_WR_CTRL_SHIFT 8
#define UDMA_S2M_STATE_DESC_PREF_MASK 0x00003000
#define UDMA_S2M_STATE_DESC_PREF_SHIFT 12
#define UDMA_S2M_STATE_AXI_WR_DATA_MASK 0x00030000
#define UDMA_S2M_STATE_AXI_WR_DATA_SHIFT 16
/**** change_state register ****/
/* Start normal operation */
#define UDMA_S2M_CHANGE_STATE_NORMAL (1 << 0)
/* Stop normal operation */
#define UDMA_S2M_CHANGE_STATE_DIS (1 << 1)
/*
* Stop all machines.
* (Prefetch, scheduling, completion and stream interface)
*/
#define UDMA_S2M_CHANGE_STATE_ABORT (1 << 2)
/**** clear_err_log register ****/
/* Clear error log */
#define UDMA_S2M_CLEAR_ERR_LOG_CLEAR (1 << 0)
/**** s_data_fifo_status register ****/
/* FIFO used indication */
#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_SHIFT 0
/* FIFO empty indication */
#define UDMA_S2M_S_DATA_FIFO_STATUS_EMPTY (1 << 24)
/* FIFO full indication */
#define UDMA_S2M_S_DATA_FIFO_STATUS_FULL (1 << 28)
/**** s_header_fifo_status register ****/
/* FIFO used indication */
#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF
#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_SHIFT 0
/* FIFO empty indication */
#define UDMA_S2M_S_HEADER_FIFO_STATUS_EMPTY (1 << 24)
/* FIFO full indication */
#define UDMA_S2M_S_HEADER_FIFO_STATUS_FULL (1 << 28)
/**** axi_data_fifo_status register ****/
/* FIFO used indication */
#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_SHIFT 0
/* FIFO empty indication */
#define UDMA_S2M_AXI_DATA_FIFO_STATUS_EMPTY (1 << 24)
/* FIFO full indication */
#define UDMA_S2M_AXI_DATA_FIFO_STATUS_FULL (1 << 28)
/**** unack_fifo_status register ****/
/* FIFO used indication */
#define UDMA_S2M_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF
#define UDMA_S2M_UNACK_FIFO_STATUS_USED_SHIFT 0
/* FIFO empty indication */
#define UDMA_S2M_UNACK_FIFO_STATUS_EMPTY (1 << 24)
/* FIFO full indication */
#define UDMA_S2M_UNACK_FIFO_STATUS_FULL (1 << 28)
/**** indirect_ctrl register ****/
/* Selected queue for status read */
#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF
#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_SHIFT 0
/**** sel_pref_fifo_status register ****/
/* FIFO used indication */
#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF
#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_SHIFT 0
/* FIFO empty indication */
#define UDMA_S2M_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24)
/* FIFO full indication */
#define UDMA_S2M_SEL_PREF_FIFO_STATUS_FULL (1 << 28)
/**** sel_comp_fifo_status register ****/
/* FIFO used indication */
#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF
#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_SHIFT 0
/* Coalescing ACTIVE FSM state indication. */
#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_MASK 0x00300000
#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_SHIFT 20
/* FIFO empty indication */
#define UDMA_S2M_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24)
/* FIFO full indication */
#define UDMA_S2M_SEL_COMP_FIFO_STATUS_FULL (1 << 28)
/**** stream_cfg register ****/
/*
* Disables the stream interface operation.
* Changing to 1 stops at the end of packet reception.
*/
#define UDMA_S2M_STREAM_CFG_DISABLE (1 << 0)
/*
* Flush the stream interface operation.
* Changing to 1 stops at the end of packet reception and assert ready to the
* stream I/F.
*/
#define UDMA_S2M_STREAM_CFG_FLUSH (1 << 4)
/* Stop descriptor prefetch when the stream is disabled and the S2M is idle. */
#define UDMA_S2M_STREAM_CFG_STOP_PREFETCH (1 << 8)
/**** desc_pref_cfg_1 register ****/
/*
* Size of the descriptor prefetch FIFO.
* (descriptors)
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF
#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0
/**** desc_pref_cfg_2 register ****/
/* Enable promotion of the current queue in progress */
#define UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION (1 << 0)
/* Force promotion of the current queue in progress */
#define UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION (1 << 1)
/* Enable prefetch prediction of next packet in line. */
#define UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION (1 << 2)
/*
* Threshold for queue promotion.
* Queue is promoted for prefetch if there are less descriptors in the prefetch
* FIFO than the threshold
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK 0x0000FF00
#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT 8
/*
* Force RR arbitration in the prefetch arbiter.
* 0 - Standard arbitration based on queue QoS
* 1 - Force round robin arbitration
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16)
/**** desc_pref_cfg_3 register ****/
/*
* Minimum descriptor burst size when prefetch FIFO level is below the
* descriptor prefetch threshold
* (must be 1)
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F
#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0
/*
* Minimum descriptor burst size when prefetch FIFO level is above the
* descriptor prefetch threshold
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0
#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4
/*
* Descriptor fetch threshold.
* Used as a threshold to determine the allowed minimum descriptor burst size.
* (Must be at least "max_desc_per_pkt")
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00
#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8
/**** desc_pref_cfg_4 register ****/
/*
* Used as a threshold for generating almost FULL indication to the application
*/
#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK 0x000000FF
#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_SHIFT 0
/**** data_cfg_1 register ****/
/*
* Maximum number of data beats in the data write FIFO.
* Defined based on data FIFO size
* (default FIFO size 512B 32 beats)
*/
#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK 0x000003FF
#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_SHIFT 0
/*
* Maximum number of packets in the data write FIFO.
* Defined based on header FIFO size
*/
#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK 0x00FF0000
#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT 16
/*
* Internal use
* Data FIFO margin
*/
#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK 0xFF000000
#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT 24
/**** data_cfg_2 register ****/
/*
* Drop timer.
* Waiting time for the host to write new descriptor to the queue
* (for the current packet in process)
*/
#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK 0x00FFFFFF
#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT 0
/*
* Drop enable.
* Enable packet drop if there are no available descriptors in the system for
* this queue
*/
#define UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC (1 << 27)
/*
* Lack of descriptors hint.
* Generate interrupt when a packet is waiting but there are no available
* descriptors in the queue
*/
#define UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC (1 << 28)
/*
* Drop conditions
* Wait until a descriptor is available in the prefetch FIFO or the host before
* dropping packet.
* 1 - Drop if a descriptor is not available in the prefetch.
* 0 - Drop if a descriptor is not available in the system
*/
#define UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF (1 << 29)
/*
* DRAM write optimization
* 0 - Data write with byte enable
* 1 - Data write is always in Full AXI bus width (128 bit)
*/
#define UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE (1 << 30)
/*
* Direct data write address
* 1 - Use buffer 1 instead of buffer 2 when direct data placement is used with
* header split.
* 0 - Use buffer 2 for the header.
*/
#define UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1 (1 << 31)
/**** cfg_1c register ****/
/*
* Completion descriptor size.
* (words)
*/
#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK 0x0000000F
#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_SHIFT 0
/*
* Completion queue counter configuration.
* Completion FIFO in use counter measured in words or descriptors
* 1 - Words
* 0 - Descriptors
*/
#define UDMA_S2M_COMP_CFG_1C_CNT_WORDS (1 << 8)
/*
* Enable promotion of the current queue in progress in the completion write
* scheduler.
*/
#define UDMA_S2M_COMP_CFG_1C_Q_PROMOTION (1 << 12)
/* Force RR arbitration in the completion arbiter */
#define UDMA_S2M_COMP_CFG_1C_FORCE_RR (1 << 16)
/* Minimum number of free completion entries to qualify for promotion */
#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000
#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28
/**** cfg_2c register ****/
/*
* Completion FIFO size.
* (words per queue)
*/
#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK 0x00000FFF
#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_SHIFT 0
/*
* Unacknowledged FIFO size.
* (descriptors)
*/
#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK 0x0FFF0000
#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT 16
/**** reg_1 register ****/
/*
* Descriptor prefetch FIFO size
* (descriptors)
*/
#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF
#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0
/**** reg_3 register ****/
/*
* Maximum number of data beats in the data write FIFO.
* Defined based on data FIFO size
* (default FIFO size 512B 32 beats)
*/
#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF
#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0
/*
* Maximum number of packets in the data write FIFO.
* Defined based on header FIFO size
*/
#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_MASK 0x00FF0000
#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_SHIFT 16
/**** reg_4 register ****/
/*
* Completion FIFO size.
* (words per queue)
*/
#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x00000FFF
#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0
/*
* Unacknowledged FIFO size.
* (descriptors)
*/
#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0FFF0000
#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 16
/**** reg_5 register ****/
/* Maximum number of outstanding data writes to the AXI */
#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_MASK 0x0000003F
#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_SHIFT 0
/*
* Maximum number of outstanding data beats for data write to AXI.
* (AXI beats)
*/
#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00
#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8
/*
* Maximum number of outstanding descriptor reads to the AXI.
* (AXI transactions)
*/
#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000
#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16
/*
* Maximum number of outstanding data beats for descriptor write to AXI.
* (AXI beats)
*/
#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
/**** reg_6 register ****/
/* Maximum number of outstanding descriptor reads to the AXI */
#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_MASK 0x0000003F
#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_SHIFT 0
/* Maximum number of outstanding stream acknowledges */
#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_MASK 0x001F0000
#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_SHIFT 16
/**** cfg register ****/
/*
* Configure the AXI AWCACHE
* for header write.
*/
#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_MASK 0x0000000F
#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_SHIFT 0
/*
* Configure the AXI AWCACHE
* for data write.
*/
#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_MASK 0x000000F0
#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_SHIFT 4
/*
* Enable operation of this queue.
* Start prefetch.
*/
#define UDMA_S2M_Q_CFG_EN_PREF (1 << 16)
/* Enables the reception of packets from the stream to this queue */
#define UDMA_S2M_Q_CFG_EN_STREAM (1 << 17)
/* Allow prefetch of less than minimum prefetch burst size. */
#define UDMA_S2M_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20)
/*
* Configure the AXI AWCACHE
* for completion descriptor write
*/
#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000
#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24
/*
* AXI QoS
* This value is used in AXI transactions associated with this queue and the
* prefetch and completion arbiters.
*/
#define UDMA_S2M_Q_CFG_AXI_QOS_MASK 0x70000000
#define UDMA_S2M_Q_CFG_AXI_QOS_SHIFT 28
/**** status register ****/
/* Indicates how many entries are used in the Queue */
#define UDMA_S2M_Q_STATUS_Q_USED_MASK 0x01FFFFFF
#define UDMA_S2M_Q_STATUS_Q_USED_SHIFT 0
/*
* prefetch status
* 0 prefetch operation is stopped
* 1 prefetch is operational
*/
#define UDMA_S2M_Q_STATUS_PREFETCH (1 << 28)
/*
* Queue receive status
* 0 -queue RX operation is stopped
* 1 RX queue is active and processing packets
*/
#define UDMA_S2M_Q_STATUS_RX (1 << 29)
/*
* Indicates if the queue is full.
* (Used by the host when head pointer equals tail pointer)
*/
#define UDMA_S2M_Q_STATUS_Q_FULL (1 << 31)
/*
* S2M Descriptor Ring Base address [31:4].
* Value of the base address of the S2M descriptor ring
* [3:0] - 0 - 16B alignment is enforced
* ([11:4] should be 0 for 4KB alignment)
*/
#define UDMA_S2M_Q_RDRBP_LOW_ADDR_MASK 0xFFFFFFF0
#define UDMA_S2M_Q_RDRBP_LOW_ADDR_SHIFT 4
/**** RDRL register ****/
/*
* Length of the descriptor ring.
* (descriptors)
* Associated with the ring base address ends at maximum burst size alignment
*/
#define UDMA_S2M_Q_RDRL_OFFSET_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RDRL_OFFSET_SHIFT 0
/**** RDRHP register ****/
/*
* Relative offset of the next descriptor that needs to be read into the
* prefetch FIFO.
* Incremented when the DMA reads valid descriptors from the host memory to the
* prefetch FIFO.
* Note that this is the offset in # of descriptors and not in byte address.
*/
#define UDMA_S2M_Q_RDRHP_OFFSET_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RDRHP_OFFSET_SHIFT 0
/* Ring ID */
#define UDMA_S2M_Q_RDRHP_RING_ID_MASK 0xC0000000
#define UDMA_S2M_Q_RDRHP_RING_ID_SHIFT 30
/**** RDRTP_inc register ****/
/*
* Increments the value in Q_RDRTP with the value written to this field in
* number of descriptors.
*/
#define UDMA_S2M_Q_RDRTP_INC_VAL_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RDRTP_INC_VAL_SHIFT 0
/**** RDRTP register ****/
/*
* Relative offset of the next free descriptor in the host memory.
* Note that this is the offset in # of descriptors and not in byte address.
*/
#define UDMA_S2M_Q_RDRTP_OFFSET_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RDRTP_OFFSET_SHIFT 0
/* Ring ID */
#define UDMA_S2M_Q_RDRTP_RING_ID_MASK 0xC0000000
#define UDMA_S2M_Q_RDRTP_RING_ID_SHIFT 30
/**** RDCP register ****/
/* Relative offset of the first descriptor in the prefetch FIFO. */
#define UDMA_S2M_Q_RDCP_OFFSET_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RDCP_OFFSET_SHIFT 0
/* Ring ID */
#define UDMA_S2M_Q_RDCP_RING_ID_MASK 0xC0000000
#define UDMA_S2M_Q_RDCP_RING_ID_SHIFT 30
/*
* S2M Descriptor Ring Base address [31:4].
* Value of the base address of the S2M descriptor ring
* [3:0] - 0 - 16B alignment is enforced
* ([11:4] Must be 0 for 4KB alignment)
* NOTE:
* Length of the descriptor ring (in descriptors) associated with the ring base
* address ends at maximum burst size alignment
*/
#define UDMA_S2M_Q_RCRBP_LOW_ADDR_MASK 0xFFFFFFF0
#define UDMA_S2M_Q_RCRBP_LOW_ADDR_SHIFT 4
/**** RCRHP register ****/
/*
* Relative offset of the next descriptor that needs to be updated by the
* completion controller.
* Note: This is in descriptors and not in byte address.
*/
#define UDMA_S2M_Q_RCRHP_OFFSET_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RCRHP_OFFSET_SHIFT 0
/* Ring ID */
#define UDMA_S2M_Q_RCRHP_RING_ID_MASK 0xC0000000
#define UDMA_S2M_Q_RCRHP_RING_ID_SHIFT 30
/**** RCRHP_internal register ****/
/*
* Relative offset of the next descriptor that needs to be updated by the
* completion controller.
* Note: This is in descriptors and not in byte address.
*/
#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF
#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_SHIFT 0
/* Ring ID */
#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_MASK 0xC0000000
#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_SHIFT 30
/**** comp_cfg register ****/
/* Enables writing to the completion ring. */
#define UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0)
/* Disables the completion coalescing function. */
#define UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL (1 << 1)
/* Reserved */
#define UDMA_S2M_Q_COMP_CFG_FIRST_PKT_PROMOTION (1 << 2)
/*
* Buffer 2 location.
* Determines the position of the buffer 2 length in the S2M completion
* descriptor.
* 0 - WORD 1 [31:16]
* 1 - WORD 2 [31:16]
*/
#define UDMA_S2M_Q_COMP_CFG_BUF2_LEN_LOCATION (1 << 3)
/**** pkt_cfg register ****/
/* Header size. (bytes) */
#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK 0x0000FFFF
#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_SHIFT 0
/* Force header split */
#define UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT (1 << 16)
/* Enable header split. */
#define UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT (1 << 17)
/**** qos_cfg register ****/
/* Queue QoS */
#define UDMA_S2M_QOS_CFG_Q_QOS_MASK 0x000000FF
#define UDMA_S2M_QOS_CFG_Q_QOS_SHIFT 0
/* Reset the tail pointer hardware. */
#define UDMA_S2M_Q_SW_CTRL_RST_TAIL_PTR (1 << 1)
/* Reset the head pointer hardware. */
#define UDMA_S2M_Q_SW_CTRL_RST_HEAD_PTR (1 << 2)
/* Reset the current pointer hardware. */
#define UDMA_S2M_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3)
/* Reset the prefetch FIFO */
#define UDMA_S2M_Q_SW_CTRL_RST_PREFETCH (1 << 4)
/* Reset the queue */
#define UDMA_S2M_Q_SW_CTRL_RST_Q (1 << 8)
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_UDMA_S2M_REG_H */

View file

@ -0,0 +1,264 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_an_lt_wrapper_regs.h
*
* @brief ... registers
*
*/
#ifndef __AL_HAL_AN_LT_wrapper_REGS_H__
#define __AL_HAL_AN_LT_wrapper_REGS_H__
#include "al_hal_plat_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Unit Registers
*/
struct al_an_lt_wrapper_gen {
/* [0x0] AN LT wrapper Version */
uint32_t version;
/* [0x4] AN LT general configuration */
uint32_t cfg;
uint32_t rsrvd[14];
};
struct al_an_lt_wrapper_an_lt {
/* [0x0] AN LT register file address */
uint32_t addr;
/* [0x4] PCS register file data */
uint32_t data;
/* [0x8] AN LT control signals */
uint32_t ctrl;
/* [0xc] AN LT status signals */
uint32_t status;
uint32_t rsrvd[4];
};
enum al_eth_an_lt_unit {
AL_ETH_AN_LT_UNIT_32_BIT = 0,
AL_ETH_AN_LT_UNIT_20_BIT = 1,
AL_ETH_AN_LT_UNIT_16_BIT = 2,
};
struct al_an_lt_wrapper_regs {
uint32_t rsrvd_0[64];
struct al_an_lt_wrapper_gen gen; /* [0x100] */
struct al_an_lt_wrapper_an_lt an_lt[3]; /* [0x140] */
};
/*
* Registers Fields
*/
/**** version register ****/
/* Revision number (Minor) */
#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
/* Revision number (Major) */
#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
/* Date of release */
#define AN_LT_WRAPPER_GEN_VERSION_DATE_DAY_MASK 0x001F0000
#define AN_LT_WRAPPER_GEN_VERSION_DATE_DAY_SHIFT 16
/* Month of release */
#define AN_LT_WRAPPER_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
#define AN_LT_WRAPPER_GEN_VERSION_DATA_MONTH_SHIFT 21
/* Year of release (starting from 2000) */
#define AN_LT_WRAPPER_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
#define AN_LT_WRAPPER_GEN_VERSION_DATE_YEAR_SHIFT 25
/* Reserved */
#define AN_LT_WRAPPER_GEN_VERSION_RESERVED_MASK 0xC0000000
#define AN_LT_WRAPPER_GEN_VERSION_RESERVED_SHIFT 30
/**** cfg register ****/
/*
* selection between different bus widths:
* 0 16
* 1 20
* 2 32
* 3 N/A
*/
#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_RX_MASK 0x00000003
#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_RX_SHIFT 0
/*
* selection between different bus widths:
* 0 16
* 1 20
* 2 32
* 3 N/A
*/
#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_TX_MASK 0x0000000C
#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_TX_SHIFT 2
/* bypass the AN/LT block */
#define AN_LT_WRAPPER_GEN_CFG_BYPASS_RX (1 << 4)
/* bypass the AN/LT block */
#define AN_LT_WRAPPER_GEN_CFG_BYPASS_TX (1 << 5)
/**** addr register ****/
/* Address value */
#define AN_LT_WRAPPER_AN_LT_ADDR_VAL_MASK 0x000007FF
#define AN_LT_WRAPPER_AN_LT_ADDR_VAL_SHIFT 0
/**** data register ****/
/* Data value */
#define AN_LT_WRAPPER_AN_LT_DATA_VAL_MASK 0x0000FFFF
#define AN_LT_WRAPPER_AN_LT_DATA_VAL_SHIFT 0
/**** ctrl register ****/
/*
* Default Auto-Negotiation Enable. If 1, the auto-negotiation process will
* start after reset de-assertion. The application can also start the
* auto-negotiation process by writing the KXAN_CONTROL.an_enable bit with 1.
* Important: This signal is OR'ed with the KXAN_CONTROL.an_enable bit. Hence,
* when asserted (1) the application is unable to disable autonegotiation and
* writing the an_enable bit has no effect.
* Note: Even if enabled by this pin, the application must write the correct
* abilities in the KXAN_ABILITY_1/2/3 registers within 60ms from reset
* deassertion (break_link_timer).
*/
#define AN_LT_WRAPPER_AN_LT_CTRL_AN_ENA (1 << 0)
/*
* If set to 1, the Arbitration State Machine reached the TRANSMIT_DISABLE
* state.
*/
#define AN_LT_WRAPPER_AN_LT_CTRL_AN_DIS_TIMER (1 << 1)
#define AN_LT_WRAPPER_AN_LT_CTRL_LINK_STATUS_KX (1 << 4)
#define AN_LT_WRAPPER_AN_LT_CTRL_LINK_STATUS_KX4 (1 << 5)
#define AN_LT_WRAPPER_AN_LT_CTRL_LINK_STATUS (1 << 6)
/*
* PHY LOS indication selection
* 0 - Select input from the SerDes
* 1 - Select register value from phy_los_in_def
*/
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_IN_SEL (1 << 8)
/* PHY LOS default value */
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_IN_DEF (1 << 9)
/* PHY LOS polarity */
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_IN_POL (1 << 10)
/*
* PHY LOS indication selection
* 0 select AN output
* 1 - Select register value from phy_los_out_def
* 2 - Select input from the SerDes
* 3 0
*/
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_SEL_MASK 0x00003000
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_SEL_SHIFT 12
/* PHY LOS default value */
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_DEF (1 << 14)
/* PHY LOS polarity */
#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_POL (1 << 15)
/**** status register ****/
/* Auto-Negotiation Done. If 1, the auto-negotiation process has completed. */
#define AN_LT_WRAPPER_AN_LT_STATUS_AN_DONE (1 << 0)
/*
* If set to 1, auto-negotiation is enabled on the link. It represents the
* enable control bit KXAN_CONTROL.an_enable. When set to 1, the signals
* an_status/an_select are valid.
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_AN_VAL (1 << 1)
/*
* If set to 0, auto-negotiation is in progress, if set to 1, the Arbitration
* State Machine reached the AN_GOOD_CHECK state (i.e. before autonegotiation is
* done, but the link no longer is used to transfer DME pages). Stays asserted
* also during AN_GOOD (autoneg done).
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_AN_STATUS (1 << 2)
/*
* Selected Technology. Becomes valid when an_status is 1.
* The selection mode number (from 0 to 24) corresponds to the Technology
* Ability (A0-A24) from the ability pages (see 4.3.2.3 page 13). The mode
* selection is based on the matching technology abilities and priority.
* A value of 31 is an invalid setting that indicates that no common technology
* could be resolved. The application should then inspect the base page results
* to determine if the link is operable or not.
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_AN_SELECT_MASK 0x000001F0
#define AN_LT_WRAPPER_AN_LT_STATUS_AN_SELECT_SHIFT 4
/*
* If set to 1, the Arbitration State Machine reached the TRANSMIT_DISABLE state
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_AN_TR_DIS_STATUS (1 << 16)
/*
* FEC Enable. Asserts when autonegotiation base page exchange identified both
* link partners advertising FEC capability and at least one is requesting FEC.
* The signal stays constant following base page exchange until autonegotiation
* is disabled or restarted.
* Note: the information can also be extracted from the base page exchange or
* the BP_ETH_STATUS register.
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_FEC_ENA (1 << 17)
/*
* Link Training Frame Lock. If set to 1 the training frame delineation has been
* acquired.
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_LT_LOCK (1 << 20)
/*
* If set to 0, link-training is in progress, if set to 1, the training is
* completed and the PCS datapath has been enabled (phy_los_out no longer
* gated).
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_LT_STATUS (1 << 21)
/*
* If set to 1, link-training is enabled on the link. It represents the enable
* control bit PMD Control.taining enable. When set to 1, the signal lt_status
* is valid
*/
#define AN_LT_WRAPPER_AN_LT_STATUS_LT_VAL (1 << 22)
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_AN_LT_wrapper_REGS_H__ */
/** @} end of ... group */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,95 @@
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_eth_alu_api API
* Ethernet Controller generic ALU API
* @ingroup group_eth
* @{
* @file al_hal_eth_alu.h
*
* @brief Header file for control parameters for the generic ALU unit in the Ethernet Datapath for Advanced Ethernet port.
*
*/
#ifndef __AL_HAL_ETH_ALU_H__
#define __AL_HAL_ETH_ALU_H__
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
enum AL_ETH_ALU_OPCODE
{
AL_ALU_FWD_A = 0,
AL_ALU_ARITHMETIC_ADD = 1,
AL_ALU_ARITHMETIC_SUBTRACT = 2,
AL_ALU_BITWISE_AND = 3,
AL_ALU_BITWISE_OR = 4,
AL_ALU_SHIFT_RIGHT_A_BY_B = 5,
AL_ALU_SHIFT_LEFT_A_BY_B = 6,
AL_ALU_BITWISE_XOR = 7,
AL_ALU_FWD_INV_A = 16,
AL_ALU_ARITHMETIC_ADD_INV_A_AND_B = 17,
AL_ALU_ARITHMETIC_SUBTRACT_INV_A_AND_B = 18,
AL_ALU_BITWISE_AND_INV_A_AND_B = 19,
AL_ALU_BITWISE_OR_INV_A_AND_B = 20,
AL_ALU_SHIFT_RIGHT_INV_A_BY_B = 21,
AL_ALU_SHIFT_LEFT_INV_A_BY_B = 22,
AL_ALU_BITWISE_XOR_INV_A_AND_B = 23,
AL_ALU_ARITHMETIC_ADD_A_AND_INV_B = 33,
AL_ALU_ARITHMETIC_SUBTRACT_A_AND_INV_B = 34,
AL_ALU_BITWISE_AND_A_AND_INV_B = 35,
AL_ALU_BITWISE_OR_A_AND_INV_B = 36,
AL_ALU_SHIFT_RIGHT_A_BY_INV_B = 37,
AL_ALU_SHIFT_LEFT_A_BY_INV_B = 38,
AL_ALU_BITWISE_XOR_A_AND_INV_B = 39,
AL_ALU_ARITHMETIC_ADD_INV_A_AND_INV_B = 49,
AL_ALU_ARITHMETIC_SUBTRACT_INV_A_AND = 50,
AL_ALU_BITWISE_AND_INV_A_AND_INV_B = 51,
AL_ALU_BITWISE_OR_INV_A_AND_INV_B = 52,
AL_ALU_SHIFT_RIGHT_INV_A_BY_INV_B = 53,
AL_ALU_SHIFT_LEFT_INV_A_BY_INV_B = 54,
AL_ALU_BITWISE_XOR_INV_A_AND_INV_B = 55
};
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* __AL_HAL_ETH_ALU_H__ */
/** @} end of Ethernet group */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,372 @@
/*-
*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_eth_kr_api API
* Ethernet KR auto-neg and link-training driver API
* @ingroup group_eth
* @{
* @file al_hal_eth_kr.h
*
* @brief Header file for KR driver
*
*
*/
#ifndef __AL_HAL_ETH_KR_H__
#define __AL_HAL_ETH_KR_H__
#include "al_hal_eth.h"
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
/* AN (Auto-negotiation) Advertisement Registers */
struct al_eth_an_adv {
/* constant value defining 802.3ap support.
* The suggested value is 0x01.*/
uint8_t selector_field;
/* Contains arbitrary data. */
uint8_t echoed_nonce;
/* pause capability. */
uint8_t capability;
/* Set to 1 to indicate a Remote Fault condition.
* Set to 0 to indicate normal operation.*/
uint8_t remote_fault;
/* Should always be set to 0. */
uint8_t acknowledge;
/* Set to 1 to indicate that the device has next pages to send.
* Set to 0 to indicate that that device has no next pages to send. */
uint8_t next_page;
/* Must be set to an arbitrary value.
* Two devices must have a different nonce for autonegotiation to
* operate (a loopback will not allow autonegotiation to complete). */
uint8_t transmitted_nonce;
uint32_t technology;
#define AL_ETH_AN_TECH_1000BASE_KX AL_BIT(0)
#define AL_ETH_AN_TECH_10GBASE_KX4 AL_BIT(1)
#define AL_ETH_AN_TECH_10GBASE_KR AL_BIT(2)
#define AL_ETH_AN_TECH_40GBASE_KR4 AL_BIT(3)
#define AL_ETH_AN_TECH_40GBASE_CR4 AL_BIT(4)
#define AL_ETH_AN_TECH_100GBASE_CR AL_BIT(5)
uint8_t fec_capability;
};
/* AN next page fields */
struct al_eth_an_np {
/* These bits can be used as message code field or unformatted code field.
* When msg_page is true, these bits represent message code field.
* Predefined message code field Code Field should be used as specified in the standard
* 802.3ap.
* For the null message code the value is 0x01.
*/
uint16_t unformatted_code_field;
/* Flag to keep track of the state of the local device's Toggle bit.
* Initial value is taken from base page. Set to 0.
*/
al_bool toggle;
/* Acknowledge 2 is used to indicate that the receiver is able to act on the information
* (or perform the task) defined in the message.
*/
al_bool ack2;
al_bool msg_page;
/* If the device does not have any more Next Pages to send, set to AL_FALSE */
al_bool next_page;
uint16_t unformatted_code_field1;
uint16_t unformatted_code_field2;
};
enum al_eth_kr_cl72_cstate {
C72_CSTATE_NOT_UPDATED = 0,
C72_CSTATE_UPDATED = 1,
C72_CSTATE_MIN = 2,
C72_CSTATE_MAX = 3,
};
enum al_eth_kr_cl72_coef_op {
AL_PHY_KR_COEF_UP_HOLD = 0,
AL_PHY_KR_COEF_UP_INC = 1,
AL_PHY_KR_COEF_UP_DEC = 2,
AL_PHY_KR_COEF_UP_RESERVED = 3
};
struct al_eth_kr_coef_up_data {
enum al_eth_kr_cl72_coef_op c_zero;
enum al_eth_kr_cl72_coef_op c_plus;
enum al_eth_kr_cl72_coef_op c_minus;
al_bool preset;
al_bool initialize;
};
struct al_eth_kr_status_report_data {
enum al_eth_kr_cl72_cstate c_zero;
enum al_eth_kr_cl72_cstate c_plus;
enum al_eth_kr_cl72_cstate c_minus;
al_bool receiver_ready;
};
enum al_eth_an_lt_lane {
AL_ETH_AN__LT_LANE_0,
AL_ETH_AN__LT_LANE_1,
AL_ETH_AN__LT_LANE_2,
AL_ETH_AN__LT_LANE_3,
};
/**
* get the last received coefficient update message from the link partner
*
* @param adapter pointer to the private structure
* @param lane lane number
* @param lpcoeff coeff update received
*
*/
void al_eth_lp_coeff_up_get(
struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane,
struct al_eth_kr_coef_up_data *lpcoeff);
/**
* get the last received status report message from the link partner
*
* @param adapter pointer to the private structure
* @param lane lane number
* @param status status report received
*
*/
void al_eth_lp_status_report_get(
struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane,
struct al_eth_kr_status_report_data *status);
/**
* set the coefficient data for the next message that will be sent to lp
*
* @param adapter pointer to the private structure
* @param lane lane number
* @param ldcoeff coeff update to send
*
*/
void al_eth_ld_coeff_up_set(
struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane,
struct al_eth_kr_coef_up_data *ldcoeff);
/**
* set the status report message for the next message that will be sent to lp
*
* @param adapter pointer to the private structure
* @param lane lane number
* @param status status report to send
*
*/
void al_eth_ld_status_report_set(
struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane,
struct al_eth_kr_status_report_data *status);
/**
* get the receiver frame lock status
*
* @param adapter pointer to the private structure
* @param lane lane number
*
* @return true if Training frame delineation is detected, otherwise false.
*/
al_bool al_eth_kr_receiver_frame_lock_get(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
/**
* get the start up protocol progress status
*
* @param adapter pointer to the private structure
* @param lane lane number
*
* @return true if the startup protocol is in progress.
*/
al_bool al_eth_kr_startup_proto_prog_get(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
/**
* indicate the receiver is ready (the link training is completed)
*
* @param adapter pointer to the private structure
* @param lane lane number
*
*/
void al_eth_receiver_ready_set(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
/**
* read Training failure status.
*
* @param adapter pointer to the private structure
* @param lane lane number
*
*@return true if Training failure has been detected.
*/
al_bool al_eth_kr_training_status_fail_get(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
/****************************** auto negotiation *******************************/
/**
* Initialize Auto-negotiation
* - Program Ability Registers (Advertisement Registers)
* - Clear Status latches
* @param adapter pointer to the private structure
* @param an_adv pointer to the AN Advertisement Registers structure
* when NULL, the registers will not be updated.
*
* @return 0 on success. otherwise on failure.
*/
int al_eth_kr_an_init(struct al_hal_eth_adapter *adapter,
struct al_eth_an_adv *an_adv);
/**
* Enable/Restart Auto-negotiation
*
* @param adapter pointer to the private structure
* @param lane lane number
* @param lt_enable initialize link training as well
*
* @return 0 on success. otherwise on failure.
*/
int al_eth_kr_an_start(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane,
al_bool next_page_enable,
al_bool lt_enable);
int al_eth_kr_next_page_write(struct al_hal_eth_adapter *adapter,
struct al_eth_an_np *np);
int al_eth_kr_next_page_read(struct al_hal_eth_adapter *adapter,
struct al_eth_an_np *np);
/**
* Stop Auto-negotiation
*
* Stopping the auto-negotiation will prevent the mac from sending the last page
* to the link partner in case it start the AN again. It must be called after
* link training is completed or the software will lose sync with the HW state
* machine
*
* @param adapter pointer to the private structure
*
*/
void al_eth_kr_an_stop(struct al_hal_eth_adapter *adapter);
/**
* Check Auto-negotiation event done
*
* @param adapter pointer to the private structure
* @param page_received Set to true if the AN page received indication is set.
* Set to false otherwise.
* @param an_completed Set to true of the AN completed indication is set.
* Set to false otherwise.
* @param error Set to true if any error encountered
*
*/
void al_eth_kr_an_status_check(struct al_hal_eth_adapter *adapter,
al_bool *page_received,
al_bool *an_completed,
al_bool *error);
/**
* Read the remote auto-negotiation advertising.
* This function is safe to called after al_eth_kr_an_status_check returned
* with page_received set.
*
* @param adapter pointer to the private structure
* @param an_adv pointer to the AN Advertisement Registers structure
*
*/
void al_eth_kr_an_read_adv(struct al_hal_eth_adapter *adapter,
struct al_eth_an_adv *an_adv);
/****************************** link training **********************************/
/**
* Initialize Link-training.
* Clear the status register and set the local coefficient update and status
* to zero.
*
* @param adapter pointer to the private structure
* @param lane lane number
*
*/
void al_eth_kr_lt_initialize(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
/**
* Wait for frame lock.
*
* @param adapter pointer to the private structure
* @param lane lane number
* @param timeout timeout in usec.
*
* @return true if frame lock received. false otherwise.
*/
al_bool al_eth_kr_lt_frame_lock_wait(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane,
uint32_t timeout);
/**
* reset the 10GBase- KR startup protocol and begin its operation
*
* @param adapter pointer to the private structure
* @param lane lane number
*
*/
void al_eth_kr_lt_restart(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
/**
* reset the 10GBase- KR startup protocol and end its operation
*
* @param adapter pointer to the private structure
* @param lane lane number
*
*/
void al_eth_kr_lt_stop(struct al_hal_eth_adapter *adapter,
enum al_eth_an_lt_lane lane);
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /*__AL_HAL_ETH_KR_H__*/
/** @} end of Ethernet kr group */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff