mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
thunderbolt: Export ring handling functions to modules
These are used by Thunderbolt services to send and receive frames over the high-speed DMA rings. We also put the functions to tb_ namespace to make sure we do not collide with others and add missing kernel-doc comments for the exported functions. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Michael Jamet <michael.jamet@intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
9fb1e654dc
commit
3b3d9f4da9
@@ -15,10 +15,12 @@
|
||||
#define THUNDERBOLT_H_
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
enum tb_cfg_pkg_type {
|
||||
TB_CFG_PKG_READ = 1,
|
||||
@@ -397,4 +399,160 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
|
||||
return tb_to_xdomain(svc->dev.parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct tb_nhi - thunderbolt native host interface
|
||||
* @lock: Must be held during ring creation/destruction. Is acquired by
|
||||
* interrupt_work when dispatching interrupts to individual rings.
|
||||
* @pdev: Pointer to the PCI device
|
||||
* @iobase: MMIO space of the NHI
|
||||
* @tx_rings: All Tx rings available on this host controller
|
||||
* @rx_rings: All Rx rings available on this host controller
|
||||
* @msix_ida: Used to allocate MSI-X vectors for rings
|
||||
* @going_away: The host controller device is about to disappear so when
|
||||
* this flag is set, avoid touching the hardware anymore.
|
||||
* @interrupt_work: Work scheduled to handle ring interrupt when no
|
||||
* MSI-X is used.
|
||||
* @hop_count: Number of rings (end point hops) supported by NHI.
|
||||
*/
|
||||
struct tb_nhi {
|
||||
struct mutex lock;
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *iobase;
|
||||
struct tb_ring **tx_rings;
|
||||
struct tb_ring **rx_rings;
|
||||
struct ida msix_ida;
|
||||
bool going_away;
|
||||
struct work_struct interrupt_work;
|
||||
u32 hop_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tb_ring - thunderbolt TX or RX ring associated with a NHI
|
||||
* @lock: Lock serializing actions to this ring. Must be acquired after
|
||||
* nhi->lock.
|
||||
* @nhi: Pointer to the native host controller interface
|
||||
* @size: Size of the ring
|
||||
* @hop: Hop (DMA channel) associated with this ring
|
||||
* @head: Head of the ring (write next descriptor here)
|
||||
* @tail: Tail of the ring (complete next descriptor here)
|
||||
* @descriptors: Allocated descriptors for this ring
|
||||
* @queue: Queue holding frames to be transferred over this ring
|
||||
* @in_flight: Queue holding frames that are currently in flight
|
||||
* @work: Interrupt work structure
|
||||
* @is_tx: Is the ring Tx or Rx
|
||||
* @running: Is the ring running
|
||||
* @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
|
||||
* @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
|
||||
* @flags: Ring specific flags
|
||||
* @sof_mask: Bit mask used to detect start of frame PDF
|
||||
* @eof_mask: Bit mask used to detect end of frame PDF
|
||||
*/
|
||||
struct tb_ring {
|
||||
struct mutex lock;
|
||||
struct tb_nhi *nhi;
|
||||
int size;
|
||||
int hop;
|
||||
int head;
|
||||
int tail;
|
||||
struct ring_desc *descriptors;
|
||||
dma_addr_t descriptors_dma;
|
||||
struct list_head queue;
|
||||
struct list_head in_flight;
|
||||
struct work_struct work;
|
||||
bool is_tx:1;
|
||||
bool running:1;
|
||||
int irq;
|
||||
u8 vector;
|
||||
unsigned int flags;
|
||||
u16 sof_mask;
|
||||
u16 eof_mask;
|
||||
};
|
||||
|
||||
/* Leave ring interrupt enabled on suspend */
|
||||
#define RING_FLAG_NO_SUSPEND BIT(0)
|
||||
/* Configure the ring to be in frame mode */
|
||||
#define RING_FLAG_FRAME BIT(1)
|
||||
/* Enable end-to-end flow control */
|
||||
#define RING_FLAG_E2E BIT(2)
|
||||
|
||||
struct ring_frame;
|
||||
typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
|
||||
|
||||
/**
|
||||
* struct ring_frame - For use with ring_rx/ring_tx
|
||||
* @buffer_phy: DMA mapped address of the frame
|
||||
* @callback: Callback called when the frame is finished
|
||||
* @list: Frame is linked to a queue using this
|
||||
* @size: Size of the frame in bytes (%0 means %4096)
|
||||
* @flags: Flags for the frame (see &enum ring_desc_flags)
|
||||
* @eof: End of frame protocol defined field
|
||||
* @sof: Start of frame protocol defined field
|
||||
*/
|
||||
struct ring_frame {
|
||||
dma_addr_t buffer_phy;
|
||||
ring_cb callback;
|
||||
struct list_head list;
|
||||
u32 size:12;
|
||||
u32 flags:12;
|
||||
u32 eof:4;
|
||||
u32 sof:4;
|
||||
};
|
||||
|
||||
/* Minimum size for ring_rx */
|
||||
#define TB_FRAME_SIZE 0x100
|
||||
|
||||
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
|
||||
unsigned int flags);
|
||||
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
|
||||
unsigned int flags, u16 sof_mask,
|
||||
u16 eof_mask);
|
||||
void tb_ring_start(struct tb_ring *ring);
|
||||
void tb_ring_stop(struct tb_ring *ring);
|
||||
void tb_ring_free(struct tb_ring *ring);
|
||||
|
||||
int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
|
||||
|
||||
/**
|
||||
* tb_ring_rx() - enqueue a frame on an RX ring
|
||||
* @ring: Ring to enqueue the frame
|
||||
* @frame: Frame to enqueue
|
||||
*
|
||||
* @frame->buffer, @frame->buffer_phy and @frame->callback have to be set. The
|
||||
* buffer must contain at least %TB_FRAME_SIZE bytes.
|
||||
*
|
||||
* @frame->callback will be invoked with @frame->size, @frame->flags,
|
||||
* @frame->eof, @frame->sof set once the frame has been received.
|
||||
*
|
||||
* If ring_stop() is called after the packet has been enqueued
|
||||
* @frame->callback will be called with canceled set to true.
|
||||
*
|
||||
* Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
|
||||
*/
|
||||
static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
|
||||
{
|
||||
WARN_ON(ring->is_tx);
|
||||
return __tb_ring_enqueue(ring, frame);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_ring_tx() - enqueue a frame on an TX ring
|
||||
* @ring: Ring the enqueue the frame
|
||||
* @frame: Frame to enqueue
|
||||
*
|
||||
* @frame->buffer, @frame->buffer_phy, @frame->callback, @frame->size,
|
||||
* @frame->eof and @frame->sof have to be set.
|
||||
*
|
||||
* @frame->callback will be invoked with once the frame has been transmitted.
|
||||
*
|
||||
* If ring_stop() is called after the packet has been enqueued @frame->callback
|
||||
* will be called with canceled set to true.
|
||||
*
|
||||
* Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
|
||||
*/
|
||||
static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
|
||||
{
|
||||
WARN_ON(!ring->is_tx);
|
||||
return __tb_ring_enqueue(ring, frame);
|
||||
}
|
||||
|
||||
#endif /* THUNDERBOLT_H_ */
|
||||
|
Reference in New Issue
Block a user