Data Structures | Defines | Typedefs | Enumerations | Functions

TI specific API

Implementation specific declarations. More...

Data Structures

struct  ti_em_buffer_config_t_
struct  ti_em_chain_config_t_
struct  ti_em_chain_rio_config_t_
struct  ti_em_chain_xge_config_t_
struct  ti_em_config_t_
struct  ti_em_device_rio_route_t_
struct  ti_em_device_xge_route_t_
struct  ti_em_iterator_t_
struct  ti_em_pair_t_
struct  ti_em_pool_config_t_
struct  ti_em_poststore_config_t_
struct  ti_em_preload_config_t_
struct  ti_em_process_route_t_

Defines

#define TI_EM_AP_PRIVATE_EVENT_NUM   (256u)
#define TI_EM_BUF_MODE_LOOSE   (1)
#define TI_EM_BUF_MODE_TIGHT   (0)
#define TI_EM_BUFFER_POOL_ID_NUM   (256u)
#define TI_EM_CD_PRIVATE_EVENT_NUM   (64u)
#define TI_EM_CHAIN_DISABLED   (0u)
#define TI_EM_CHAIN_ENABLED   (1u)
#define TI_EM_CHAIN_TX_QUEUE_NUM   (4u)
#define TI_EM_CHAINING_PKTDMA   (0)
#define TI_EM_COH_MODE_OFF   (0x0)
#define TI_EM_COH_MODE_ON   (0x1)
#define TI_EM_COH_MODE_RESERVED0   (0x2)
#define TI_EM_COH_MODE_RESERVED1   (0x3)
#define TI_EM_CORE_NUM   (8u)
#define TI_EM_DEVICE_NUM   (256)
#define TI_EM_DMA_QUEUE_NUM   (TI_EM_CORE_NUM + 2)
#define TI_EM_EO_NUM_MAX   (TI_EM_QUEUE_NUM_MAX)
#define TI_EM_EVENT_GROUP_NUM_MAX   (16384u)
#define TI_EM_EVENT_TYPE_PRELOAD_MSK   (0xC0)
#define TI_EM_EVENT_TYPE_PRELOAD_OFF   (0u)
#define TI_EM_EVENT_TYPE_PRELOAD_ON_SIZE_A   (1u<<6)
#define TI_EM_EVENT_TYPE_PRELOAD_ON_SIZE_B   (2u<<6)
#define TI_EM_EVENT_TYPE_PRELOAD_ON_SIZE_C   (3u<<6)
#define TI_EM_HW_QUEUE_NUM
#define TI_EM_HW_QUEUE_STEP   (32u)
#define TI_EM_INTERRUPT_DISABLE   (0)
#define TI_EM_ITERATOR_WSIZE   (8u)
#define TI_EM_PDSP_GLOBAL_DATA_SIZE   (8192u)
#define TI_EM_PF_LEN   (1u)
#define TI_EM_POOL_NUM   (32u)
#define TI_EM_PRELOAD_DISABLED   (0u)
#define TI_EM_PRELOAD_ENABLED   (1u)
#define TI_EM_PRIO_NUM   (EM_QUEUE_PRIO_HIGHEST + 1)
#define TI_EM_PRIVATE_EVENT_DSC_SIZE   (16u)
#define TI_EM_PROCESS_NUM   (2u)
#define TI_EM_PUSH_POLICY_HEAD   (0x1)
#define TI_EM_PUSH_POLICY_TAIL   (0x0)
#define TI_EM_QUEUE_GROUP_NUM_MAX   (TI_EM_PRIO_NUM * TI_EM_CORE_NUM)
#define TI_EM_QUEUE_MODE_HW   (1u)
#define TI_EM_QUEUE_MODE_SD   (0u)
#define TI_EM_QUEUE_NUM_IN_SET   (8u)
#define TI_EM_QUEUE_NUM_MAX   (16384u)
#define TI_EM_QUEUE_SET_NUM   (256u)
#define TI_EM_SCHEDULER_THREAD_NUM   (4u)
#define TI_EM_SLOT_SPCB   (0)
#define TI_EM_STATIC_QUEUE_NUM_IN_SET   (2u)
#define TI_EM_STATIC_QUEUE_NUM_MAX   (256u)
#define TI_EM_STREAM_NUM   (256u)
#define TI_EM_TSCOPE_ALLOC   (1)
#define TI_EM_TSCOPE_ATOMIC_PROCESSING_END   (6)
#define TI_EM_TSCOPE_CLAIM_LOCAL   (5)
#define TI_EM_TSCOPE_DISPATCH   (3)
#define TI_EM_TSCOPE_FREE   (7)
#define TI_EM_TSCOPE_PRESCHEDULE   (4)
#define TI_EM_TSCOPE_SEND   (2)
#define TI_EM_XGE_CHAIN_HEADER_SIZE   (8u)
#define TI_EM_XGE_CRC_SIZE   (4)
#define TI_EM_XGE_ENET_HEADER_SIZE   (18u)
#define TI_EM_XGE_FRAME_SIZE_MIN   (64u)
#define TI_EM_XGE_HEADER_SIZE   (TI_EM_XGE_ENET_HEADER_SIZE + TI_EM_XGE_CHAIN_HEADER_SIZE)
#define TI_EM_XGE_PAYLOAD_SIZE_MIN   (TI_EM_XGE_FRAME_SIZE_MIN - TI_EM_XGE_HEADER_SIZE - TI_EM_XGE_CRC_SIZE )
#define TI_EM_XGE_RX_FRAGMENT_SIZE_MIN   (TI_EM_XGE_PAYLOAD_SIZE_MIN + TI_EM_XGE_CRC_SIZE)
#define TI_EM_XGE_RX_HEADER_SIZE   (TI_EM_XGE_HEADER_SIZE)
#define TI_EM_XGE_RX_MISS_QUEUE_NUM   (TI_EM_XGE_VLAN_PRIO_NUM)
#define TI_EM_XGE_TX_DIVERT_QUEUE_NUM   (32u)
#define TI_EM_XGE_TX_FRAGMENT_SIZE   0
#define TI_EM_XGE_TX_HEADER_SIZE   (TI_EM_XGE_HEADER_SIZE + TI_EM_XGE_PAYLOAD_SIZE_MIN)
#define TI_EM_XGE_TX_QUEUE_NUM   (1u)
#define TI_EM_XGE_VLAN_PRIO_NUM   (8u)

Typedefs

typedef uint32_t ti_em_buf_mode_t
typedef uint32_t ti_em_coh_mode_t
typedef struct ti_em_config_t_ ti_em_config_t
typedef enum ti_em_counter_type_e ti_em_counter_type_e
typedef uint32_t ti_em_counter_type_t
typedef uint32_t ti_em_counter_value_t
typedef uint32_t ti_em_destination_id_t
typedef uint8_t ti_em_device_id_t
typedef uint32_t ti_em_dma_id_t
typedef uint32_t ti_em_flow_id_t
typedef void(* ti_em_free_func_t )(void *buffer_ptr, size_t buffer_size)
typedef uint8_t ti_em_interrupt_id_t
typedef uint32_t ti_em_packet_t
typedef uint8_t ti_em_pdsp_id_t
typedef uint8_t ti_em_process_id_t
typedef uint32_t ti_em_process_type_t
typedef uint32_t ti_em_push_policy_t
typedef uint16_t ti_em_queue_id_t
typedef uint8_t ti_em_queue_mode_t
typedef uint32_t ti_em_sem_id_t
typedef uint32_t ti_em_stream_id_t
typedef em_status_t(* ti_em_trace_handler_t )(ti_em_tscope_t tscope,...)
typedef uint32_t ti_em_tscope_t
typedef enum
ti_em_xge_rx_miss_type_e 
ti_em_xge_rx_miss_type_e
typedef uint32_t ti_em_xge_rx_miss_type_t

Enumerations

enum  ti_em_counter_type_e
enum  ti_em_xge_rx_miss_type_e

Functions

em_event_t ti_em_alloc_local (size_t size, em_event_type_t type)
em_event_t ti_em_alloc_with_buffers (em_event_type_t event_type, em_pool_id_t pool_id, int32_t buffer_num, ti_em_buffer_config_t *buffer_config_tbl)
uint32_t ti_em_atomic_processing_locality (void)
size_t ti_em_buffer_size (em_event_t event)
em_status_t ti_em_chain_rx_flow_open (int dma_idx, int flow_idx, int dst_queue_idx, int free_queue_idxSizeA, int sizeA, int free_queue_idxSizeB, int sizeB, int free_queue_idxSizeC, int sizeC, int free_queue_idxSizeD, int sizeD, int free_queue_idxOverflow, int error_handling)
void * ti_em_claim_local (void)
em_event_t ti_em_combine (ti_em_pair_t event_pair, ti_em_iterator_t *iterator_ptr)
em_status_t ti_em_counter_get (ti_em_counter_type_t type, ti_em_counter_value_t *value_ptr, int flag)
em_status_t ti_em_device_add_rio_route (ti_em_device_id_t device_idx, ti_em_device_rio_route_t device_route)
em_status_t ti_em_device_add_xge_route (ti_em_device_id_t device_idx, ti_em_device_xge_route_t device_route)
em_status_t ti_em_dispatch_once (void)
size_t ti_em_event_size (em_event_t event)
em_status_t ti_em_exit_global (void)
void ti_em_flush (em_event_t event)
em_event_t ti_em_free_with_buffers (em_event_t event, int32_t buffer_num, ti_em_buffer_config_t *buffer_config_tbl)
em_event_t ti_em_from_packet (ti_em_packet_t *packet_ptr)
ti_em_queue_id_t ti_em_get_absolute_queue_id (ti_em_dma_id_t dma_idx, ti_em_queue_id_t queue_idx)
ti_em_buf_mode_t ti_em_get_buf_mode (const em_event_t event)
ti_em_coh_mode_t ti_em_get_coh_mode (const em_event_t event)
size_t ti_em_get_eo_size_fast (void)
size_t ti_em_get_eo_size_slow (void)
size_t ti_em_get_event_group_size_fast (void)
size_t ti_em_get_event_group_size_slow (void)
size_t ti_em_get_pcb_size (void)
em_status_t ti_em_get_ps_words (em_event_t event, uint32_t *ps_word_ptr, size_t ps_wsize)
size_t ti_em_get_ps_wsize (em_event_t event)
size_t ti_em_get_queue_group_size_fast (void)
size_t ti_em_get_queue_group_size_slow (void)
size_t ti_em_get_queue_size_fast (void)
size_t ti_em_get_queue_size_slow (void)
size_t ti_em_get_tcb_size (void)
em_event_type_t ti_em_get_type (const em_event_t event)
em_event_type_t ti_em_get_type_preload (em_event_type_t type)
em_status_t ti_em_hw_queue_close (int queue_idx)
em_status_t ti_em_hw_queue_open (int queue_idx)
em_status_t ti_em_init_global (const ti_em_config_t *config_ptr)
em_status_t ti_em_init_local (void)
em_status_t ti_em_interrupt_disable (void)
em_status_t ti_em_interrupt_enable (void)
em_status_t ti_em_iterator_next (ti_em_iterator_t *iterator_ptr)
void * ti_em_iterator_pointer (ti_em_iterator_t *iterator_ptr)
em_status_t ti_em_iterator_previous (ti_em_iterator_t *iterator_ptr)
size_t ti_em_iterator_size (ti_em_iterator_t *iterator_ptr)
em_status_t ti_em_iterator_start (em_event_t event, ti_em_iterator_t *iterator_ptr)
em_status_t ti_em_iterator_stop (ti_em_iterator_t *iterator_ptr)
void ti_em_packet_restore_free_info (ti_em_packet_t *packet_ptr)
void ti_em_packet_set_buffer_info (ti_em_packet_t *packet_ptr, ti_em_buffer_config_t buffer_config)
void ti_em_packet_set_default (ti_em_packet_t *packet_ptr)
void ti_em_packet_set_event_group (ti_em_packet_t *packet_ptr, em_event_group_t event_group_hdl)
void ti_em_packet_set_pool_info (ti_em_packet_t *packet_ptr, ti_em_pool_config_t pool_config)
void ti_em_packet_set_queue (ti_em_packet_t *packet_ptr, em_queue_t queue_hdl)
void ti_em_packet_set_type (ti_em_packet_t *packet_ptr, em_event_type_t event_type)
void ti_em_preschedule (void)
em_status_t ti_em_process_add_route (ti_em_process_route_t process_route, ti_em_process_id_t process_idx)
em_queue_t ti_em_queue_create_hw (const char *name, ti_em_queue_id_t hw_queue_idx)
em_status_t ti_em_queue_create_hw_static (const char *name, ti_em_queue_id_t hw_queue_idx, em_queue_t queue)
ti_em_device_id_t ti_em_queue_get_device_id (em_queue_t queue_hdl)
ti_em_queue_mode_t ti_em_queue_get_mode (em_queue_t queue_hdl)
ti_em_process_id_t ti_em_queue_get_process_id (em_queue_t queue_hdl)
ti_em_queue_id_t ti_em_queue_get_queue_id (em_queue_t queue_hdl)
em_queue_t ti_em_queue_make_global (em_queue_t queue_hdl, ti_em_device_id_t device_idx, ti_em_process_id_t process_idx)
em_event_t ti_em_receive (em_queue_t queue)
em_status_t ti_em_register_trace_handler (ti_em_trace_handler_t handler)
em_status_t ti_em_rio_rx_flow_open (int dma_idx, int flow_idx, int dst_queue_idx, int free_queue_idxSizeA, int sizeA, int free_queue_idxSizeB, int sizeB, int free_queue_idxSizeC, int sizeC, int free_queue_idxSizeD, int sizeD, int free_queue_idxOverflow, int deviceIdx, int processIdx, int error_handling)
em_status_t ti_em_rio_tx_channel_open (int dma_idx)
ti_em_queue_id_t ti_em_rio_tx_queue_open (int rio_tx_queue_idx)
em_status_t ti_em_rx_channel_close (int dma_idx, int channel_idx)
em_status_t ti_em_rx_channel_open (int dma_idx, int channel_idx)
em_status_t ti_em_rx_flow_close (int dma_idx, int flow_idx)
em_status_t ti_em_rx_flow_open (int dma_idx, int flow_idx, int dst_queue_idx, int free_queue_idx, int error_handling)
em_status_t ti_em_set_ps_words (em_event_t event, uint32_t *ps_word_ptr, size_t ps_wsize)
em_status_t ti_em_set_ps_wsize (em_event_t event, size_t ps_wsize)
void ti_em_set_queue (em_event_t event, em_queue_t queue_hdl, em_event_group_t event_group_hdl)
void ti_em_set_type (em_event_t event, em_event_type_t event_type)
ti_em_pair_t ti_em_split (em_event_t event, ti_em_iterator_t *iterator_ptr)
void ti_em_tag_set_queue (uint32_t *tag_ptr, em_queue_t queue_hdl)
void ti_em_tag_set_type (uint32_t *tag_ptr, em_event_type_t event_type)
ti_em_packet_tti_em_to_packet (em_event_t event)
em_status_t ti_em_tx_channel_close (int dma_idx, int channel_idx)
em_status_t ti_em_tx_channel_open (int dma_idx, int channel_idx)
em_status_t ti_em_unregister_trace_handler (void)
em_status_t ti_em_xge_rx_channel_close (int dma_idx, int channel_idx)
em_status_t ti_em_xge_rx_channel_open (int dma_idx, int channel_idx)
em_status_t ti_em_xge_rx_flow_close (int dma_idx, int flow_idx)
em_status_t ti_em_xge_rx_flow_open (int dma_idx, int flow_idx, int dst_queue_idx, int free_queue_idx0, int free_queue_idx1, int error_handling)
em_status_t ti_em_xge_rx_miss_disable (ti_em_xge_rx_miss_type_t miss_type)
em_status_t ti_em_xge_rx_miss_enable (ti_em_xge_rx_miss_type_t miss_type)
em_status_t ti_em_xge_tx_channel_close (int dma_idx, int channel_idx)
em_status_t ti_em_xge_tx_channel_open (int dma_idx, int channel_idx)
ti_em_queue_id_t ti_em_xge_tx_queue_base_idx_get (void)
ti_em_queue_id_t ti_em_xge_tx_queue_open (ti_em_queue_id_t queue_base_idx, int vlan_priority)

Detailed Description

Implementation specific declarations.

Attention:
Unless otherwise specified values of symbolic constants cannot be changed.

Define Documentation

#define TI_EM_AP_PRIVATE_EVENT_NUM   (256u)

Maximum number of AP private tokens used by the EM

#define TI_EM_BUF_MODE_LOOSE   (1)

Buffer mode - Loose buffer

#define TI_EM_BUF_MODE_TIGHT   (0)

Buffer mode - Tight buffer

#define TI_EM_BUFFER_POOL_ID_NUM   (256u)

Maximum number of pool Id that can be set in the event descriptor pool index field (8 bits)

#define TI_EM_CD_PRIVATE_EVENT_NUM   (64u)

Maximum number of CD private tokens used by the EM

#define TI_EM_CHAIN_DISABLED   (0u)

Chaining disabled on Open-EM process.

#define TI_EM_CHAIN_ENABLED   (1u)

Chaining enabled on Open-EM process.

#define TI_EM_CHAIN_TX_QUEUE_NUM   (4u)

Maximum number of TX queues for chaining and poststoring. Must be a power of 2: 1, 2, 4 or 8.

#define TI_EM_CHAINING_PKTDMA   (0)

ID for the various chaining mechanisms (i.e. chaining modules)

#define TI_EM_COH_MODE_OFF   (0x0)

Cache coherency mode - Off

#define TI_EM_COH_MODE_ON   (0x1)

Cache coherency mode - On

#define TI_EM_COH_MODE_RESERVED0   (0x2)

Cache coherency mode - Reserved0

#define TI_EM_COH_MODE_RESERVED1   (0x3)

Cache coherency mode - Reserved1

#define TI_EM_CORE_NUM   (8u)

Maximum number of cores on a device on which the Event Machine can be executed.

#define TI_EM_DEVICE_NUM   (256)

Maximum number of devices on which the Open-EM can be executed.

#define TI_EM_DMA_QUEUE_NUM   (TI_EM_CORE_NUM + 2)

Total number of PKTDMA queues used by the EM

#define TI_EM_EO_NUM_MAX   (TI_EM_QUEUE_NUM_MAX)

Maximum number of execution objects.

#define TI_EM_EVENT_GROUP_NUM_MAX   (16384u)

Maximum number of event groups.

#define TI_EM_EVENT_TYPE_PRELOAD_MSK   (0xC0)

Event type preload mask.

#define TI_EM_EVENT_TYPE_PRELOAD_OFF   (0u)

Major event types (portable) : Data Preloading Off

Note:
Application should always ignore the actual values.
#define TI_EM_EVENT_TYPE_PRELOAD_ON_SIZE_A   (1u<<6)

Major event types (portable) : Data Preloading up to size A

Note:
Application should always ignore the actual values.
#define TI_EM_EVENT_TYPE_PRELOAD_ON_SIZE_B   (2u<<6)

Major event types (portable) : Data Preloading up to size B

Note:
Application should always ignore the actual values.
#define TI_EM_EVENT_TYPE_PRELOAD_ON_SIZE_C   (3u<<6)

Major event types (portable) : Data Preloading up to size C

Note:
Application should always ignore the actual values.
#define TI_EM_HW_QUEUE_NUM
Value:

Total number of queues used by the EM

#define TI_EM_HW_QUEUE_STEP   (32u)

Alignment of QM queue base index

#define TI_EM_INTERRUPT_DISABLE   (0)

Enable/disable PDSP interrupt generation

#define TI_EM_ITERATOR_WSIZE   (8u)

Number of 32 bits words in an ti_em_iterator_t array.

#define TI_EM_PDSP_GLOBAL_DATA_SIZE   (8192u)

Multi-core Navigator PDSP memory size to allocated for private events and private firmware data.

#define TI_EM_PF_LEN   (1u)

Number of PF events per core.

#define TI_EM_POOL_NUM   (32u)

Maximum number of buffer pools that can be specified.

Note:
This value can be modified by an application. But the event machine library needs to be recompiled.
#define TI_EM_PRELOAD_DISABLED   (0u)

Preload disabled.

#define TI_EM_PRELOAD_ENABLED   (1u)

Preload enabled.

#define TI_EM_PRIO_NUM   (EM_QUEUE_PRIO_HIGHEST + 1)

Highest Priority

#define TI_EM_PRIVATE_EVENT_DSC_SIZE   (16u)

Size for all the private token used by the EM

#define TI_EM_PROCESS_NUM   (2u)

Maximum number of Open-EM processes on a device on which the Event Machine can be executed.

#define TI_EM_PUSH_POLICY_HEAD   (0x1)

Push policy to head

#define TI_EM_PUSH_POLICY_TAIL   (0x0)

Push policy to tail

#define TI_EM_QUEUE_GROUP_NUM_MAX   (TI_EM_PRIO_NUM * TI_EM_CORE_NUM)

Maximum number of queue groups.

#define TI_EM_QUEUE_MODE_HW   (1u)

Queue associated with an hardware (HW) queue.

#define TI_EM_QUEUE_MODE_SD   (0u)

Queue associated with a scheduling (SD) queue.

#define TI_EM_QUEUE_NUM_IN_SET   (8u)

Maximum number of queues in a set. TI_EM_QUEUE_SET_NUM * TI_EM_QUEUE_NUM_IN_SET <= TI_EM_QUEUE_NUM_MAX.

#define TI_EM_QUEUE_NUM_MAX   (16384u)

Maximum number of queues.

#define TI_EM_QUEUE_SET_NUM   (256u)

Maximum number of queue sets. Must be a power of 2.

#define TI_EM_SCHEDULER_THREAD_NUM   (4u)

Maximum number of Navigator PDSP instances on which this process scheduler can be executed. Only relevant on a Keystone II target.

#define TI_EM_SLOT_SPCB   (0)

Definitions of the various PDSP communication memory slots

#define TI_EM_STATIC_QUEUE_NUM_IN_SET   (2u)

Number of static queues in a set. TI_EM_QUEUE_SET_NUM*TI_EM_STATIC_QUEUE_NUM_IN_SET <= TI_EM_STATIC_QUEUE_NUM_MAX

#define TI_EM_STATIC_QUEUE_NUM_MAX   (256u)

Maximum number of static queues. TI_EM_STATIC_QUEUE_NUM_MAX <= TI_EM_QUEUE_NUM_MAX.

#define TI_EM_STREAM_NUM   (256u)

Maximum number of XGE streams.

#define TI_EM_TSCOPE_ALLOC   (1)

Trace scope for the em_alloc API.

#define TI_EM_TSCOPE_ATOMIC_PROCESSING_END   (6)

Trace scope for the em_atomic_processing_end API.

#define TI_EM_TSCOPE_CLAIM_LOCAL   (5)

Trace scope for the ti_em_claim_local API.

#define TI_EM_TSCOPE_DISPATCH   (3)

Trace scope for the ti_em_dispatch_once API.

#define TI_EM_TSCOPE_FREE   (7)

Trace scope for the em_free API.

#define TI_EM_TSCOPE_PRESCHEDULE   (4)

Trace scope for the ti_em_preschedule API.

#define TI_EM_TSCOPE_SEND   (2)

Trace scope for the em_send API.

#define TI_EM_XGE_CHAIN_HEADER_SIZE   (8u)

Size (in bytes) of the XGE header.

#define TI_EM_XGE_CRC_SIZE   (4)

Size (in bytes) of the CRC added by XGE.

#define TI_EM_XGE_ENET_HEADER_SIZE   (18u)

Size (in bytes) of the XGE header.

#define TI_EM_XGE_FRAME_SIZE_MIN   (64u)

Minimal size (in bytes) of the XGE frame.

#define TI_EM_XGE_HEADER_SIZE   (TI_EM_XGE_ENET_HEADER_SIZE + TI_EM_XGE_CHAIN_HEADER_SIZE)

Size (in bytes) of the XGE header (18+8=26).

#define TI_EM_XGE_PAYLOAD_SIZE_MIN   (TI_EM_XGE_FRAME_SIZE_MIN - TI_EM_XGE_HEADER_SIZE - TI_EM_XGE_CRC_SIZE )

Minimum size (in bytes) of the fragment size (64 - 26 - 4 = 34 Bytes).

#define TI_EM_XGE_RX_FRAGMENT_SIZE_MIN   (TI_EM_XGE_PAYLOAD_SIZE_MIN + TI_EM_XGE_CRC_SIZE)

Minimal size (in bytes) to be allocated for the XGE RX fragment buffer (34 + 4 = 38).

#define TI_EM_XGE_RX_HEADER_SIZE   (TI_EM_XGE_HEADER_SIZE)

Size (in bytes) to be allocated for the XGE RX header buffer (26).

#define TI_EM_XGE_RX_MISS_QUEUE_NUM   (TI_EM_XGE_VLAN_PRIO_NUM)

Number of contiguous RX miss queues that need to be reserved for chaining over XGE

#define TI_EM_XGE_TX_DIVERT_QUEUE_NUM   (32u)

Number of contiguous TX divert queues that need to be reserved for chaining over XGE

#define TI_EM_XGE_TX_FRAGMENT_SIZE   0

Size (in bytes) to be allocated for the XGE TX fragment buffer

#define TI_EM_XGE_TX_HEADER_SIZE   (TI_EM_XGE_HEADER_SIZE + TI_EM_XGE_PAYLOAD_SIZE_MIN)

Size (in bytes) to be allocated for the XGE TX header buffer (26 + 34 = 60).

#define TI_EM_XGE_TX_QUEUE_NUM   (1u)

Maximum number of contiguous XGE TX queues

#define TI_EM_XGE_VLAN_PRIO_NUM   (8u)

Number of VLAN priorities that need to be reserved for chaining over XGE


Typedef Documentation

typedef uint32_t ti_em_buf_mode_t

Identifies the event buffer mode.

typedef uint32_t ti_em_coh_mode_t

Cache Coherency mode. Identifies the data buffer cache coherency mode.

typedef associated with ti_em_config_t_

The structure describes the counters addressed by the ti_em_counter_get() api.

typedef uint32_t ti_em_counter_type_t

Counter type. Identifies the counter type.

typedef uint32_t ti_em_counter_value_t

Counter value. Identifies the counter value.

typedef uint32_t ti_em_destination_id_t

Destination id. Identifies the destination for a route. It may be a ti_em_process_id_t or a ti_em_device_id_t.

typedef uint8_t ti_em_device_id_t

Device id. Identifies the device.

typedef uint32_t ti_em_dma_id_t

DMA id. Identifies the DMA.

typedef uint32_t ti_em_flow_id_t

flow id. Identifies the flow.

typedef void(* ti_em_free_func_t)(void *buffer_ptr, size_t buffer_size)

Free funtion handler.

Parameters:
buffer_ptrbuffer pointer
buffer_sizebuffer size
typedef uint8_t ti_em_interrupt_id_t

Identifies the Interrupt index.

typedef uint32_t ti_em_packet_t

Packet. Identifies the packet of the event.

typedef uint8_t ti_em_pdsp_id_t

Pdsp id. Identifies the Pdsp.

typedef uint8_t ti_em_process_id_t

Openem process id. Identifies the openem process.

typedef uint32_t ti_em_process_type_t

Openem process type. Identifies the openem process type.

typedef uint32_t ti_em_push_policy_t

Identifies the push policy.

typedef uint16_t ti_em_queue_id_t

Event queue id. Identifies the event queue.

typedef uint8_t ti_em_queue_mode_t

Identifies the Queue Mode.

typedef uint32_t ti_em_sem_id_t

Hardware semaphore id. Identifies the hardware semaphore.

typedef uint32_t ti_em_stream_id_t

Stream id. Identifies the stream.

Trace function handler.

Parameters:
tscopeError scope. Identifies the scope for interpreting the error code and variable arguments
argsVariable number and type of arguments
Returns:
The function may not return depending on implementation.
See also:
em_register_trace_handler(), em_unregister_trace_handler()
typedef uint32_t ti_em_tscope_t

Trace scope. Identifies the scope for interpreting trace codes and variable arguments

The structure describes the XGE chain header miss types checked by the XGE router.

typedef uint32_t ti_em_xge_rx_miss_type_t

Rx miss type. Identifies the rx miss type associated with the chaining over xge.


Enumeration Type Documentation

The structure describes the counters addressed by the ti_em_counter_get() api.

Enumerator:
ti_em_counter_type_XGE_RECEIVE 

XGE router counter counting the received fragments.

ti_em_counter_type_XGE_WRONG_ETHER_TYPE 

XGE router counter counting the received fragments with wrong ether type.

ti_em_counter_type_XGE_WRONG_MAC 

XGE router counter counting the received fragments with wrong MAC address.

ti_em_counter_type_XGE_WRONG_PROCESS 

XGE router counter counting the received fragments with wrong process index.

ti_em_counter_type_XGE_WRONG_SEQUENCE 

XGE router counter counting the received fragments with wrong sequence index.

ti_em_counter_type_XGE_WRONG_SERVICE_TYPE 

XGE router counter counting the received fragments with wrong service type.

The structure describes the XGE chain header miss types checked by the XGE router.

Enumerator:
ti_em_xge_rx_miss_type_ETHER_TYPE 

XGE chain header ETHER_TYPE check.

ti_em_xge_rx_miss_type_SERVICE_TYPE 

XGE chain header SERVICE_TYPE check.


Function Documentation

em_event_t ti_em_alloc_local ( size_t  size,
em_event_type_t  type 
)

Allocate a local event (for post-storing feature).

Memory address of the allocated event is system specific and can depend on given pool id, event size and type. Returned event (handle) may refer to a memory buffer or a HW specific descriptor, i.e. the event structure is system specific.

Use em_event_pointer() to convert an event (handle) to a pointer to the event structure.

EM_EVENT_TYPE_SW with minor type 0 is reserved for direct portability. It is always guaranteed to return a 64-bit aligned contiguous data buffer, that can directly be used by the application up to the given size (no HW specific descriptors etc are visible).

Parameters:
sizeEvent size in octets
typeEvent type to allocate
Returns:
the allocated event or EM_EVENT_UNDEF on an error
See also:
em_free(), em_send(), em_event_pointer(), em_receive_func_t()
em_event_t ti_em_alloc_with_buffers ( em_event_type_t  event_type,
em_pool_id_t  pool_id,
int32_t  buffer_num,
ti_em_buffer_config_t buffer_config_tbl 
)

Allocates an event while providing buffer(s) to attach.

It allocates a primary event from pools with zero-buffer descriptors.
It attaches one or more buffers to the event.
The event comes with optional free function and coherency mode for each buffer.

Parameters:
event_typeEvent type to allocate
pool_idEvent pool id
buffer_numEvent buffers number
buffer_config_tblData buffers configuration table
Returns:
Event handle
See also:
ti_em_free_with_buffers()
uint32_t ti_em_atomic_processing_locality ( void  )

Indicates that the atomic processing locality has been achieved by the scheduler.

Returns:
Hint if the atomic processing locality has been achieved.
size_t ti_em_buffer_size ( em_event_t  event)

Returns the buffer size.

Returns the size of the event attached data buffer.

Parameters:
eventEvent handle.
Returns:
Size of the attached buffer.
em_status_t ti_em_chain_rx_flow_open ( int  dma_idx,
int  flow_idx,
int  dst_queue_idx,
int  free_queue_idxSizeA,
int  sizeA,
int  free_queue_idxSizeB,
int  sizeB,
int  free_queue_idxSizeC,
int  sizeC,
int  free_queue_idxSizeD,
int  sizeD,
int  free_queue_idxOverflow,
int  error_handling 
)

Opens an hard-ware process chaining receive flow on the data-base and configure the hard-ware registers of this receive flow.

Precondition:
The hard-ware receive flow shall not be used by another part of the application on the device.
If the hard-ware receive flow is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_configureRxFlow PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
flow_idxIndex of the receive flow to be open.
It ranges from 0 to the maximum Rx Flow index supported by the Multicore Navigator instance.
dst_queue_idxIndex of the destination queue.
free_queue_idxSizeAIndex of the destination free queue for packet length
smaller than sizeA. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeAMaximal size in bytes of the packet length for free queue A
free_queue_idxSizeBIndex of the destination free queue for packet length
smaller than sizeB. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeBMaximal size in bytes of the packet length for free queue B
free_queue_idxSizeCIndex of the destination free queue for packet length
smaller than sizeC. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeCMaximal size in bytes of the packet length for free queue C
free_queue_idxSizeDIndex of the destination free queue for packet length
smaller than sizeD. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeDMaximal size in bytes of the packet length for free queue D
free_queue_idxOverflowIndex of the destination free queue for packet length
with overflow. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
error_handlingReceive flow error handling mode when starvation occurs.
0 = Starvation errors result in dropping packet.
1 = Starvation errors result in subsequent re-try.
Postcondition:
On success, the RX_FLOW_CONFIG registers shall be correctly configured.
Returns:
status, EM_OK on success
void* ti_em_claim_local ( void  )

The function returns the local event buffer pointer for a local event or NULL if there is no local event.

Returns:
Pointer to the event buffer.
em_event_t ti_em_combine ( ti_em_pair_t  event_pair,
ti_em_iterator_t iterator_ptr 
)

Combines two (2) event handles into one scattered event handle.
When returned, the iterator buffer points to last descriptor of the old pair.head_event.

Parameters:
event_pairpair to combine.
iterator_ptrPointer to iterator.
Returns:
event handle.
em_status_t ti_em_counter_get ( ti_em_counter_type_t  type,
ti_em_counter_value_t value_ptr,
int  flag 
)

Returns the value of the counter specified by the "type" parameter. If the "flag" parameter is set to 1, the counter is read and then reset to 0 in the API call. The returned counter value is written at the "value_ptr" address.

Parameters:
typetype
flagreset flag; if set to 1, the counter is reset to 0.
value_ptrpointer to the variable where to write the counter value.
Returns:
status, EM_OK on success
em_status_t ti_em_device_add_rio_route ( ti_em_device_id_t  device_idx,
ti_em_device_rio_route_t  device_route 
)

Adds an SRIO route to the device router.

Parameters:
device_idxDevice index.
device_routeDevice route.
Returns:
status, EM_OK on success
em_status_t ti_em_device_add_xge_route ( ti_em_device_id_t  device_idx,
ti_em_device_xge_route_t  device_route 
)

Adds an XGE route to the device router.

Parameters:
device_idxDevice index.
device_routeDevice route.
Returns:
status, EM_OK on success
em_status_t ti_em_dispatch_once ( void  )

The function runs the dispatcher once.

The dispatcher code is responsible for calling a execution object receive API. It checks once if a event has been scheduled to its core and calls the receive function.

Returns:
returns EM_ERR_NOT_FOUND if no receive function was executed. returns EM_OK when a public event was executed.
size_t ti_em_event_size ( em_event_t  event)

Returns the event payload size.

Returns the size of the event attached data buffers.

Parameters:
eventEvent handle.
Returns:
Size of the attached buffers.
em_status_t ti_em_exit_global ( void  )

Global shutdown of EM internals.

Only one core does this and after this call, no other call is allowed.

Returns:
status, EM_OK on success
void ti_em_flush ( em_event_t  event)

Loops over all data buffers of the event and perform a cache write back invalidate if the data buffer is dirty.

Parameters:
eventEvent handle.
em_event_t ti_em_free_with_buffers ( em_event_t  event,
int32_t  buffer_num,
ti_em_buffer_config_t buffer_config_tbl 
)

Frees an event with buffers.

It returns an event to its free pool(s).
It doesn't call free function, even if provided.
It fills out the configuration for each buffer that has been attached with ti_em_alloc_with_buffers().
It returns the number of such buffers - error if too many buffer pointers to return (wrt to buffer_num).

Parameters:
eventEvent to be freed
buffer_numEvent buffers number
buffer_config_tblData buffers configuration table
Returns:
Event handle.
See also:
ti_em_alloc_with_buffers()
em_event_t ti_em_from_packet ( ti_em_packet_t packet_ptr)

Helper function that converts into an event handle from a packet.

Parameters:
packet_ptrPointer to the packet.
Returns:
Event handle.
ti_em_queue_id_t ti_em_get_absolute_queue_id ( ti_em_dma_id_t  dma_idx,
ti_em_queue_id_t  queue_idx 
)

Retrieves the absolute queue index from the associated Multicore Navigator PkDMA engine index and the relative TX queue index.

Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the CPPI LLD.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
queue_idxRelative index of the queue.
Returns:
Absolute index of the queue.
ti_em_buf_mode_t ti_em_get_buf_mode ( const em_event_t  event)

Helper function which returns the buffer mode.

Parameters:
eventEvent handle.
Returns:
Buffer mode of the event.
ti_em_coh_mode_t ti_em_get_coh_mode ( const em_event_t  event)

Helper function which returns the cache coherency mode.

Parameters:
eventEvent handle.
Returns:
Cache coherency mode of the event.
size_t ti_em_get_eo_size_fast ( void  )

The function returns the needed size for the fast part of the EO descriptor in bytes. It is stored in the .tiEmGlobalFast memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the fast part of a EO structure
size_t ti_em_get_eo_size_slow ( void  )

The function returns the needed size for the slow part of the EO descriptor in bytes. It is stored in the .tiEmGlobalSlow memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the slow part of a EO structure
size_t ti_em_get_event_group_size_fast ( void  )

The function returns the needed size for the fast part of the event group descriptor in bytes. It is stored in the .tiEmGlobalFast memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the fast part of a event group structure
size_t ti_em_get_event_group_size_slow ( void  )

The function returns the needed size for the slow part of the event group descriptor in bytes. It is stored in the .tiEmGlobalSlow memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the slow part of a event group structure
size_t ti_em_get_pcb_size ( void  )

The function returns the needed size for the master control block plus the runtime master control block in bytes. It is stored in the .tiEmGlobalFast memory section.

Returns:
size for the Master Control Block structure
em_status_t ti_em_get_ps_words ( em_event_t  event,
uint32_t *  ps_word_ptr,
size_t  ps_wsize 
)

Helper function which gets the ps words from the event.

Parameters:
eventEvent handle.
ps_word_ptrPointer to ps words.
ps_wsizeNumber of ps words.
Returns:
status, EM_OK on success
size_t ti_em_get_ps_wsize ( em_event_t  event)

Helper function which gets the number of ps words from the event.

Parameters:
eventEvent handle.
Returns:
Number of ps words
size_t ti_em_get_queue_group_size_fast ( void  )

The function returns the needed size for the fast part of the queue group descriptor in bytes. It is stored in the .tiEmGlobalFast memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the fast part of a queue group structure
size_t ti_em_get_queue_group_size_slow ( void  )

The function returns the needed size for the slow part of the queue group descriptor in bytes. It is stored in the .tiEmGlobalSlow memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the slow part of a queue group structure
size_t ti_em_get_queue_size_fast ( void  )

The function returns the needed size for the fast part of the queue descriptor in bytes. It is stored in the .tiEmGlobalFast memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the fast part of a queue structure
size_t ti_em_get_queue_size_slow ( void  )

The function returns the needed size for the slow part of the queue descriptor in bytes. It is stored in the .tiEmGlobalSlow memory section preferably located in non cached MSMC RAM or DDR3 RAM.

Returns:
size for the slow part of a queue structure
size_t ti_em_get_tcb_size ( void  )

The function returns the needed size for the dispatcher control block in bytes. It is stored in the .tiEmLobal memory section.

Returns:
size for the Dispatcher Control Block structure
em_event_type_t ti_em_get_type ( const em_event_t  event)

Helper function which returns the event type.

Parameters:
eventEvent handle.
Returns:
Event type of the descriptor.
em_event_type_t ti_em_get_type_preload ( em_event_type_t  type)

Helper function which extracts the preload policy from the event type.

Parameters:
typeEvent type.
Returns:
Preload policy.
em_status_t ti_em_hw_queue_close ( int  queue_idx)

Closes an hard-ware queue on the data-base.

Precondition:
The hard-ware queue shall already be used by the Open-Em process.
If the hard-ware queue is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Qmss_queueClose PDK function.
Parameters:
queue_idxIndex of the queue to be open.
It ranges from 0 to the maximum queue index supported by the Multicore Navigator.
Postcondition:
None.
Returns:
status, EM_OK on success.
em_status_t ti_em_hw_queue_open ( int  queue_idx)

Opens an hard-ware queue on the data-base.

Precondition:
The hard-ware queue shall not be used by another part of the application on the device.
If the hard-ware queue is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Qmss_queueOpen PDK function.
Parameters:
queue_idxIndex of the queue to be open.
It ranges from 0 to the maximum queue index supported by the Multicore Navigator.
Postcondition:
None.
Returns:
status, EM_OK on success.
em_status_t ti_em_init_global ( const ti_em_config_t config_ptr)

Global initialization of EM internals.

Only one core does this and this must be called before any other call.

Parameters:
config_ptrPointer to the hardware configuration structure.
Returns:
status, EM_OK on success
em_status_t ti_em_init_local ( void  )

Local initialization of EM internals.

All cores call this and it must be called after em_init_global(), but before any other call. Implementation may be actually empty, but this might be needed later for some core specific initializations, so application startup should call this always.

Returns:
status, EM_OK on success
em_status_t ti_em_interrupt_disable ( void  )

Disables the interrupt generation for the attached dispatcher.

Returns:
status, EM_OK on success
em_status_t ti_em_interrupt_enable ( void  )

Enables the interrupt generation for the attached dispatcher. The interrupt channel index used is based on the configuration hw_queue_base_idx and the physical core index. If the interrupt channel index is inferior to 32, it is mapped to one of the 32 HI channels in the associated INTD register. Otherwise it is mapped to one of the 16 LOW channels. By default, the interrupt are not enabled.

Returns:
status, EM_OK on success
em_status_t ti_em_iterator_next ( ti_em_iterator_t iterator_ptr)

Moves the iterator to the next buffer.

Parameters:
iterator_ptrPointer to iterator.
Returns:
returns EM_OK when successfull, EM_ERR_BAD_STATE when iterator is in a wrong state. EM_ERR_NOT_FOUND when the iterator points to the last buffer.
void* ti_em_iterator_pointer ( ti_em_iterator_t iterator_ptr)

Returns a pointer to the iterator attached data buffer.

Parameters:
iterator_ptrPointer to iterator.
Returns:
Pointer to the buffer.
em_status_t ti_em_iterator_previous ( ti_em_iterator_t iterator_ptr)

Moves the iterator to the previous buffer.

Parameters:
iterator_ptrPointer to iterator.
Returns:
returns EM_OK when successfull, EM_ERR_BAD_STATE when iterator is in a wrong state. EM_ERR_NOT_FOUND when the iterator points to the first buffer.
size_t ti_em_iterator_size ( ti_em_iterator_t iterator_ptr)

Returns the size of the iterator attached data buffer.

Parameters:
iterator_ptrPointer to iterator.
Returns:
Size of the attached buffer.
em_status_t ti_em_iterator_start ( em_event_t  event,
ti_em_iterator_t iterator_ptr 
)

Initializes the iterator to the first buffer. Two iterators may not be started at the same time for a single event.

Parameters:
eventEvent handle to start.
iterator_ptrPointer to iterator.
Returns:
returns EM_OK.
em_status_t ti_em_iterator_stop ( ti_em_iterator_t iterator_ptr)

It registers all the touched buffers.
It is mandatory to perform ti_em_iterator_stop() before em_free(), em_send() or em_event_group_apply().
It is forbidden to use the iterator or any iterator content from this point onwards.

Parameters:
iterator_ptrPointer to iterator.
Returns:
returns EM_OK when successfull, EM_ERR_BAD_STATE when iterator is in a wrong state.
void ti_em_packet_restore_free_info ( ti_em_packet_t packet_ptr)

Helper function which restores the free information in the packet.

Parameters:
packet_ptrPointer to the packet.
void ti_em_packet_set_buffer_info ( ti_em_packet_t packet_ptr,
ti_em_buffer_config_t  buffer_config 
)

Helper function which sets the buffer information in the packet.

Parameters:
packet_ptrPointer to the packet.
buffer_configStructure containing the buffer info.
void ti_em_packet_set_default ( ti_em_packet_t packet_ptr)

Helper function which sets the 12 first words of an event to their default values.

The CPPI type is set to Host, The EPIB flag is ON and the PS location flag is ON.

Parameters:
packet_ptrPointer to the packet.
void ti_em_packet_set_event_group ( ti_em_packet_t packet_ptr,
em_event_group_t  event_group_hdl 
)

Helper function which sets the event group information in the packet.

Parameters:
packet_ptrPointer to the packet.
event_group_hdlEvent group handle to set.
void ti_em_packet_set_pool_info ( ti_em_packet_t packet_ptr,
ti_em_pool_config_t  pool_config 
)

Helper function which sets the event pool information in the packet.

Parameters:
packet_ptrPointer to the packet.
pool_configStructure containing the pool info.
void ti_em_packet_set_queue ( ti_em_packet_t packet_ptr,
em_queue_t  queue_hdl 
)

Helper function which sets the queue information in the packet.

Parameters:
packet_ptrPointer to the packet.
queue_hdlQueue handle to set.
void ti_em_packet_set_type ( ti_em_packet_t packet_ptr,
em_event_type_t  event_type 
)

Helper function which sets the event type in the packet.

Parameters:
packet_ptrPointer to the packet.
event_typeEvent type of the packet.
void ti_em_preschedule ( void  )

Indicates that the scheduler can pre-schedule events to this core.

em_status_t ti_em_process_add_route ( ti_em_process_route_t  process_route,
ti_em_process_id_t  process_idx 
)

Adds a process route to the device router.

Parameters:
process_routeParameters of the process route.
process_idxProcess index.
Returns:
status, EM_OK on success
em_queue_t ti_em_queue_create_hw ( const char *  name,
ti_em_queue_id_t  hw_queue_idx 
)

Creates a HW queue.

Parameters:
nameQueue name for debugging purposes (optional, NULL ok)
hw_queue_idxHw queue index
Returns:
new queue id or EM_QUEUE_UNDEF on an error
em_status_t ti_em_queue_create_hw_static ( const char *  name,
ti_em_queue_id_t  hw_queue_idx,
em_queue_t  queue 
)

Creates a static HW queue.

Parameters:
nameQueue name for debugging purposes (optional, NULL ok)
hw_queue_idxHw queue index
queueRequested queue id from the static range
Returns:
EM_OK on success or EM_QUEUE_UNDEF on an error
ti_em_device_id_t ti_em_queue_get_device_id ( em_queue_t  queue_hdl)

Helper function which extracts the device index from the queue handle.

Parameters:
queue_hdlQueue handle.
Returns:
Device index.
ti_em_queue_mode_t ti_em_queue_get_mode ( em_queue_t  queue_hdl)

Helper function which extracts the queue mode (HW or SD) from the queue handle.

Parameters:
queue_hdlQueue handle.
Returns:
Queue mode.
ti_em_process_id_t ti_em_queue_get_process_id ( em_queue_t  queue_hdl)

Helper function which extracts the process index from the queue handle.

Parameters:
queue_hdlQueue handle.
Returns:
Process index.
ti_em_queue_id_t ti_em_queue_get_queue_id ( em_queue_t  queue_hdl)

Helper function which returns the index of the associated Multicore Navigator queue.

Parameters:
queue_hdlQueue handle.
Returns:
Device index.
em_queue_t ti_em_queue_make_global ( em_queue_t  queue_hdl,
ti_em_device_id_t  device_idx,
ti_em_process_id_t  process_idx 
)

Helper function which generates a global queue handle.

Parameters:
queue_hdlLocal queue handle.
device_idxDevice index.
process_idxProcess index.
Returns:
Global queue handle.
em_event_t ti_em_receive ( em_queue_t  queue)

Receives an event from a queue.

Event must have been allocated with em_alloc().

Parameters:
queueReceiving queue
Returns:
the received event or EM_EVENT_UNDEF if the queue is empty.
See also:
em_alloc()
em_status_t ti_em_register_trace_handler ( ti_em_trace_handler_t  handler)

Registers the trace handler.

Parameters:
handlerTrace handler.
Returns:
Success code.
em_status_t ti_em_rio_rx_flow_open ( int  dma_idx,
int  flow_idx,
int  dst_queue_idx,
int  free_queue_idxSizeA,
int  sizeA,
int  free_queue_idxSizeB,
int  sizeB,
int  free_queue_idxSizeC,
int  sizeC,
int  free_queue_idxSizeD,
int  sizeD,
int  free_queue_idxOverflow,
int  deviceIdx,
int  processIdx,
int  error_handling 
)

Opens an hard-ware process RIO receive flow on the data-base and configure the hard-ware registers of this receive flow. It also creates the mapping between the Type9 message parameters and the opened flow.

Precondition:
The hard-ware receive flow shall not be used by another part of the application on the device.
If the hard-ware receive flow is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_configureRxFlow PDK function and the CSL_SRIO_MapType9MessageToQueue CSL API.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
flow_idxIndex of the receive flow to be open.
It ranges from 0 to the maximum Rx Flow index supported by the Multicore Navigator instance.
dst_queue_idxIndex of the destination queue.
free_queue_idxSizeAIndex of the destination free queue for packet length
smaller than sizeA. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeAMaximal size in bytes of the packet length for free queue A
free_queue_idxSizeBIndex of the destination free queue for packet length
smaller than sizeB. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeBMaximal size in bytes of the packet length for free queue B
free_queue_idxSizeCIndex of the destination free queue for packet length
smaller than sizeC. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeCMaximal size in bytes of the packet length for free queue C
free_queue_idxSizeDIndex of the destination free queue for packet length
smaller than sizeD. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
sizeDMaximal size in bytes of the packet length for free queue D
free_queue_idxOverflowIndex of the destination free queue for packet length
with overflow. Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
deviceIdxIndex of the current device
processIdxIndex of the current process
error_handlingReceive flow error handling mode when starvation occurs.
0 = Starvation errors result in dropping packet.
1 = Starvation errors result in subsequent re-try.
Postcondition:
On success, the RX_FLOW_CONFIG registers shall be correctly configured.
Returns:
status, EM_OK on success
em_status_t ti_em_rio_tx_channel_open ( int  dma_idx)

Opens an hard-ware SRIO transmit channel on the data-base and configure the hard-ware registers of this transmit channel.

Precondition:
The hard-ware transmit channel shall not be used by another part of the application on the device.
If the hard-ware transmit channel is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_txChannelOpen PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
Postcondition:
On success, the TX_CHANNEL_GLOBAL_CONFIG_REG_A register shall be enabled.
The TX_CHANNEL_SCHEDULER_CONFIG_REG_PRIORITY register shall be configured to the desired value.
Returns:
status, EM_OK on success
ti_em_queue_id_t ti_em_rio_tx_queue_open ( int  rio_tx_queue_idx)

Opens an hard-ware SRIO transmit queue on the data-base and configure the hard-ware registers of this transmit queue.

Precondition:
The hard-ware transmit queue shall not be used by another part of the application on the device.
If the hard-ware transmit queue is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the ti_em_hw_queue_open function.
Parameters:
rio_tx_queue_idxRIO queue index
Returns:
index of the RIO transmit queue or error
em_status_t ti_em_rx_channel_close ( int  dma_idx,
int  channel_idx 
)

Closes an hard-ware receive channel on the data-base.

Precondition:
The hard-ware receive channel shall be used by the Open-EM process.
If the hard-ware receive channel is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_channelClose PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the receive channel to be closed.
It ranges from 0 to the maximum Rx Channel index supported by the Multicore Navigator instance.
Returns:
status, EM_OK on success
em_status_t ti_em_rx_channel_open ( int  dma_idx,
int  channel_idx 
)

Opens an hard-ware receive channel on the data-base and configure the hard-ware registers of this receive channel.

Precondition:
The hard-ware receive channel shall not be used by another part of the application on the device.
If the hard-ware receive channel is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_rxChannelOpen PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the receive channel to be open.
It ranges from 0 to the maximum Rx Channel index supported by the Multicore Navigator instance.
Postcondition:
On success, the RX_CHANNEL_GLOBAL_CONFIG_REG register shall be enabled.
Returns:
status, EM_OK on success
em_status_t ti_em_rx_flow_close ( int  dma_idx,
int  flow_idx 
)

Closes an hard-ware receive flow on the data-base.

Precondition:
The hard-ware receive flow shall be used by the Open-EM process.
If the hard-ware receive flow is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_closeRxFlow PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
flow_idxIndex of the receive flow to be closed.
It ranges from 0 to the maximum Rx Flow index supported by the Multicore Navigator instance.
Returns:
status, EM_OK on success
em_status_t ti_em_rx_flow_open ( int  dma_idx,
int  flow_idx,
int  dst_queue_idx,
int  free_queue_idx,
int  error_handling 
)

Opens an hard-ware receive flow on the data-base and configure the hard-ware registers of this receive flow.

Precondition:
The hard-ware receive flow shall not be used by another part of the application on the device.
If the hard-ware receive flow is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_configureRxFlow PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
flow_idxIndex of the receive flow to be open.
It ranges from 0 to the maximum Rx Flow index supported by the Multicore Navigator instance.
dst_queue_idxIndex of the destination queue.
free_queue_idxIndex of the destination free queue.
Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
error_handlingReceive flow error handling mode when starvation occurs.
0 = Starvation errors result in dropping packet.
1 = Starvation errors result in subsequent re-try.
Postcondition:
On success, the RX_FLOW_CONFIG registers shall be correctly configured.
Returns:
status, EM_OK on success
em_status_t ti_em_set_ps_words ( em_event_t  event,
uint32_t *  ps_word_ptr,
size_t  ps_wsize 
)

Helper function which sets the ps words in the event.

Parameters:
eventEvent handle.
ps_word_ptrPointer to ps words.
ps_wsizeNumber of ps words.
Returns:
status, EM_OK on success
em_status_t ti_em_set_ps_wsize ( em_event_t  event,
size_t  ps_wsize 
)

Helper function which sets the number of ps words in the event.

Parameters:
eventEvent handle.
ps_wsizeNumber of ps words.
Returns:
status, EM_OK on success
void ti_em_set_queue ( em_event_t  event,
em_queue_t  queue_hdl,
em_event_group_t  event_group_hdl 
)

Helper function which sets the queue and event group information in the event.

Parameters:
eventEvent handle.
queue_hdlQueue handle to set.
event_group_hdlEvent group handle to set.
void ti_em_set_type ( em_event_t  event,
em_event_type_t  event_type 
)

Helper function which sets the event type in the event.

Parameters:
eventEvent handle.
event_typeEvent type to set.
ti_em_pair_t ti_em_split ( em_event_t  event,
ti_em_iterator_t iterator_ptr 
)

Splits one scattered event into two events.
when returned, the iterator buffer points to the last buffer of pair.head_event (can be recombined right away).
Second and following buffers of pair.tail_event are cache coherent.

Parameters:
eventEvent to split.
iterator_ptrPointer to iterator where the split starts.
Returns:
Events pair.
void ti_em_tag_set_queue ( uint32_t *  tag_ptr,
em_queue_t  queue_hdl 
)

Helper function which sets the queue information in the event tag location.

Parameters:
tag_ptrPointer to tag location.
queue_hdlQueue handle to set.
void ti_em_tag_set_type ( uint32_t *  tag_ptr,
em_event_type_t  event_type 
)

Helper function which sets the event type in the tag location.

Parameters:
tag_ptrPointer to tag location.
event_typeEvent type to set.
ti_em_packet_t* ti_em_to_packet ( em_event_t  event)

Helper function that converts an event handle into a packet.

Parameters:
eventEvent handle.
Returns:
Pointer to the packet.
em_status_t ti_em_tx_channel_close ( int  dma_idx,
int  channel_idx 
)

Closes an hard-ware transmit channel on the data-base.

Precondition:
The hard-ware transmit channel shall be used by the Open-EM process.
If the hard-ware transmit channel is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_channelClose PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the transmit channel to be closed.
It ranges from 0 to the maximum Tx Channel index supported by the Multicore Navigator instance.
Returns:
status, EM_OK on success
em_status_t ti_em_tx_channel_open ( int  dma_idx,
int  channel_idx 
)

Opens an hard-ware transmit channel on the data-base and configure the hard-ware registers of this transmit channel.

Precondition:
The hard-ware transmit channel shall not be used by another part of the application on the device.
If the hard-ware transmit channel is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_txChannelOpen PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the transmit channel to be open.
It ranges from 0 to the maximum Tx Channel index supported by the Multicore Navigator instance.
Postcondition:
On success, the TX_CHANNEL_GLOBAL_CONFIG_REG_A register shall be enabled.
The TX_CHANNEL_SCHEDULER_CONFIG_REG_PRIORITY register shall be configured to the desired value.
Returns:
status, EM_OK on success
em_status_t ti_em_unregister_trace_handler ( void  )

Unregisters the trace handler.

Returns:
Success code is 0,
em_status_t ti_em_xge_rx_channel_close ( int  dma_idx,
int  channel_idx 
)

Closes an hard-ware XGE receive channel on the data-base.

Precondition:
The hard-ware receive channel shall be used by the Open-EM process.
If the hard-ware receive channel is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_channelClose PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the receive channel to be closed.
It ranges from 0 to the maximum Rx Channel index supported by the Multicore Navigator instance.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_rx_channel_open ( int  dma_idx,
int  channel_idx 
)

Opens an hard-ware xge receive channel on the data-base and configure the hard-ware registers of this receive channel.

Precondition:
The hard-ware receive channel shall not be used by another part of the application on the device.
If the hard-ware receive channel is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_rxChannelOpen PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the receive channel to be open.
It ranges from 0 to the maximum Rx Channel index supported by the Multicore Navigator instance.
Postcondition:
On success, the RX_CHANNEL_GLOBAL_CONFIG_REG register shall be enabled.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_rx_flow_close ( int  dma_idx,
int  flow_idx 
)

Closes an hard-ware XGE receive flow on the data-base.

Precondition:
The hard-ware receive flow shall be used by the Open-EM process.
If the hard-ware receive flow is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_closeRxFlow PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
flow_idxIndex of the receive flow to be closed.
It ranges from 0 to the maximum Rx Flow index supported by the Multicore Navigator instance.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_rx_flow_open ( int  dma_idx,
int  flow_idx,
int  dst_queue_idx,
int  free_queue_idx0,
int  free_queue_idx1,
int  error_handling 
)

Opens an hard-ware xge receive flow on the data-base and configure the hard-ware registers of this receive flow.

Precondition:
The hard-ware receive flow shall not be used by another part of the application on the device.
If the hard-ware receive flow is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_configureRxFlow PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
flow_idxIndex of the receive flow to be open.
It ranges from 0 to the maximum Rx Flow index supported by the Multicore Navigator instance.
dst_queue_idxIndex of the destination queue.
free_queue_idx0Index of the destination free queue.
Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
free_queue_idx1Index of the destination free queue.
Queue indexes range from 0 to the maximum queue index supported by the Multicore Navigator instance.
error_handlingReceive flow error handling mode when starvation occurs.
0 = Starvation errors result in dropping packet.
1 = Starvation errors result in subsequent re-try.
Postcondition:
On success, the RX_FLOW_CONFIG registers shall be correctly configured.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_rx_miss_disable ( ti_em_xge_rx_miss_type_t  miss_type)

Disable miss redirection for Ethertype/service type.

Parameters:
miss_typemiss type disabled.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_rx_miss_enable ( ti_em_xge_rx_miss_type_t  miss_type)

Enable miss redirection for Ethertype/service type.

Parameters:
miss_typemiss type enabled.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_tx_channel_close ( int  dma_idx,
int  channel_idx 
)

Closes an hard-ware XGE transmit channel on the data-base.

Precondition:
The hard-ware transmit channel shall be used by the Open-EM process.
If the hard-ware transmit channel is not used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_channelClose PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the transmit channel to be closed.
It ranges from 0 to the maximum Tx Channel index supported by the Multicore Navigator instance.
Returns:
status, EM_OK on success
em_status_t ti_em_xge_tx_channel_open ( int  dma_idx,
int  channel_idx 
)

Opens an hard-ware xge transmit channel on the data-base and configure the hard-ware registers of this transmit channel.

Precondition:
The hard-ware transmit channel shall not be used by another part of the application on the device.
If the hard-ware transmit channel is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the Cppi_txChannelOpen PDK function.
Parameters:
dma_idxIndex of the dma instance.
This refers to the Multicore Navigator PktDMA instance.
channel_idxIndex of the transmit channel to be open.
It ranges from 0 to the maximum Tx Channel index supported by the Multicore Navigator instance.
Postcondition:
On success, the TX_CHANNEL_GLOBAL_CONFIG_REG_A register shall be enabled.
The TX_CHANNEL_SCHEDULER_CONFIG_REG_PRIORITY register shall be configured to the desired value.
Returns:
status, EM_OK on success
ti_em_queue_id_t ti_em_xge_tx_queue_base_idx_get ( void  )

Returns the first hard-ware xge transmit queue among TI_EM_XGE_VLAN_PRIO_NUM (8).

Precondition:
The hard-ware transmit queue shall not be used by another part of the application on the device.
If the hard-ware transmit queue is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the ti_em_xge_tx_queue_base_idx_get function.
Parameters:
.
Returns:
index of the first XGE transmit queue
ti_em_queue_id_t ti_em_xge_tx_queue_open ( ti_em_queue_id_t  queue_base_idx,
int  vlan_priority 
)

Opens an hard-ware xge transmit queue on the data-base and configure the hard-ware registers of this transmit queue.

Precondition:
The hard-ware transmit queue shall not be used by another part of the application on the device.
If the hard-ware transmit queue is already used, the function is not successful.
Remarks:
The implementation is left to the user. An example is provided in em_pdk_hal.c file using the ti_em_hw_queue_open function.
Parameters:
queue_base_idxIndex of the 1st XGE tx queue
vlan_priorityVLAN priority.
Returns:
index of the XGE transmit queue