};
#define DMA_BUFFER_NUM_PAGES ((8 * 1024 * 1024) / PAGE_SIZE)
-#define PEXOR_DMA_MAXPOLLS 1000000
+#define PEXOR_DMA_MAXPOLLS 10000
#define PEXOR_DMA_POLLDELAY 0
#define PEXOR_MEMWRITE_SIZE 128
}
/* allocate memwrite_buffer */
- privdata->memwrite_buffer = vmalloc_32(PEXOR_MEMWRITE_SIZE * 4);
+ privdata->memwrite_buffer = vmalloc_32_user(PEXOR_MEMWRITE_SIZE * 4);
if (privdata->memwrite_buffer == NULL) {
pexor_msg(KERN_ERR "ERROR> probe allocate memwrite_buffer failed\n");
return -ENOMEM;
return 0;
}
+static ssize_t pexor_read_buffer_ctr = 0;
+
ssize_t pexor_read(struct file * filp,
char __user * buf, size_t count, loff_t * f_pos)
{
return -EFAULT;
}
if (count > privdata->dma.size) {
+ pexor_msg(KERN_ERR "ERROR> pexor_read: requested count (%d) > Max: %d\n",
+ (int)count, (int)privdata->dma.size);
+ return -EFAULT;
+ }
+ if (count > pexor_read_buffer_ctr) {
+ pexor_msg(KERN_ERR "ERROR> pexor_read: requested count (%d) > "
+ "Buffer: %d\n",
+ (int)count, (int)pexor_read_buffer_ctr);
return -EFAULT;
}
+ pexor_read_buffer_ctr -= count;
if (down_interruptible(&privdata->sem) != 0) {
return -ERESTARTSYS;
}
if (copy_to_user(buf, privdata->dma.buffer, count)) {
+ pexor_msg(KERN_ERR "ERROR> pexor_read: copy_to_user failed\n");
retval = -EFAULT;
goto out_read;
}
priv->dma.nr_sglist = 0;
/* allocate DAM-Buffer */
- priv->dma.buffer = vmalloc_32(size);
+ priv->dma.buffer = vmalloc_32_user(size);
if (priv->dma.buffer == NULL) {
pexor_msg(KERN_ERR
"ERROR> pexor_alloc_dma_buffer: vmalloc failed buffer\n");
unsigned int ctr = 0;
volatile u32 val = 0;
unsigned int i;
-
+
if (channel >= PEXOR_TRB_NUM_CHANNELS)
return -1;
-
+
+ pexor_read_buffer_ctr = 0;
for (sg_ctr = 0; sg_ctr < priv->dma.nr_sglist; sg_ctr++) {
- /* sync current sg-entry to CPU */
- dma_sync_single_for_cpu(priv->class_dev,
- sg_dma_address(&priv->dma.sglist[sg_ctr]),
- sg_dma_len(&priv->dma.sglist[sg_ctr]),
- DMA_FROM_DEVICE);
sg_length = sg_dma_len(&priv->dma.sglist[sg_ctr]);
for (i = 0; i < sg_length; i++) {
timeout = 0;
do {
val = ioread32(priv->pexor.trbnet_receiver_data[channel]);
if ((val & MASK_FIFO_TRB_ACT) == 0) {
+ pexor_read_buffer_ctr += ctr * 4;
return ctr;
}
} while (((val & MASK_FIFO_VALID) == 0) &&
#endif
ctr++;
}
-
+
}
/* should not happen,out of buffers etc */
if (descriptor.dma != 0 && channel == 3) {
/* only channel 3 supports DMA */
-
+ pexor_read_buffer_ctr = 0;
+
/* Start DMA transfer */
pexor_dbg(KERN_ERR "Start DMA transfer\n");
for (sg_ctr = 0; sg_ctr < priv->dma.nr_sglist; sg_ctr++) {
unsigned int loops = 0;
mb();
- /* do we need this?? */
- /*
- dma_sync_single_for_device(priv->class_dev,
- sg_dma_address(&priv->dma.sglist[sg_ctr]),
- sg_dma_len(&priv->dma.sglist[sg_ctr]),
- DMA_FROM_DEVICE);
- */
+ /* sync current sg-entry to Device */
+ dma_sync_single_for_device(priv->class_dev,
+ sg_dma_address(&priv->dma.sglist[sg_ctr]),
+ sg_dma_len(&priv->dma.sglist[sg_ctr]),
+ DMA_FROM_DEVICE);
+
iowrite32(sg_dma_address(&priv->dma.sglist[sg_ctr]),
priv->pexor.dma_dest);
iowrite32(sg_dma_len(&priv->dma.sglist[sg_ctr]) / 4,
priv->pexor.dma_len);
/* wait for dma complete */
- for (loops = 0; loops < PEXOR_DMA_MAXPOLLS * 100; loops++) {
+ for (loops = 0; loops < PEXOR_DMA_MAXPOLLS; loops++) {
dmastat = ioread32(priv->pexor.dma_control_stat);
- //pexor_msg(KERN_ERR "DMA: Status: is: 0x%08x %d\n", dmastat, loops);
mb();
if ((dmastat & PEXOR_TRB_BIT_DMA_FINISHED) != 0) {
/* DMA is completed */
dmaSize = dmastat >> 8;
if (dmaSize == 0) {
- pexor_msg(KERN_ERR "DMA: Zero Length Error, Status: 0x%08x\n", dmastat);
+ pexor_msg(KERN_ERR "DMA: Zero Length Error, Status: 0x%08x\n",
+ dmastat);
}
break;
}
if ((dmastat & PEXOR_TRB_BIT_DMA_MORE) != 0) {
/* Card needs more DMA-Buffers */
- //pexor_msg(KERN_ERR "DMA: More Status: 0x%08x\n", dmastat);
break;
}
if ((dmastat & PEXOR_TRB_BIT_DMA_TIMEOUT) != 0) {
/* TRBNet Timeout */
- //pexor_msg(KERN_ERR "DMA: Timeout Status: 0x%08x\n", dmastat);
pexor_msg(KERN_ERR
"ERROR> wait_dma_complete: TRBNet Timeout Bit set "
"Status: 0x%08x\n",
(unsigned int)dmastat);
+ /* reset DMA */
+ iowrite32(PEXOR_TRB_DMA_RESET, priv->pexor.dma_control_stat);
+ /* do we need to flush the fifo-buffer? */
status = -EFAULT;
- goto OUT_DMA;
+ goto OUT_IOCTL;
}
}
/* Check for kernel timeout */
"(delay %d ns) for dma complete! Status: 0x%08x\n",
PEXOR_DMA_MAXPOLLS, PEXOR_DMA_POLLDELAY,
(unsigned int)dmastat);
+ /* reset DMA */
+ iowrite32(PEXOR_TRB_DMA_RESET, priv->pexor.dma_control_stat);
/* do we need to flush the fifo-buffer? */
status = -EFAULT;
- goto OUT_DMA;
+ goto OUT_IOCTL;
}
/* sync current sg-entry to CPU */
if ((sg_ctr + 1) >= priv->dma.nr_sglist) {
pexor_msg(KERN_ERR
"ERROR> no more DMA buffers available, aborting DMA\n");
+ /* reset DMA */
+ iowrite32(PEXOR_TRB_DMA_RESET, priv->pexor.dma_control_stat);
/* do we need to flush the fifo-buffer? */
status = -EFAULT;
- goto OUT_DMA;
+ goto OUT_IOCTL;
}
}
status = dmaSize;
+ pexor_read_buffer_ctr = dmaSize * 4;
#ifdef PEXOR_TRB_DEBUG
{
} else {
/* do FIFO transfer to DMA Buffer */
- pexor_dbg(KERN_ERR "Start FIFO copy to DMA buffer\n");
status = pexor_copy_fifo_to_dma(priv, channel);
+ pexor_dbg(KERN_ERR "FIFO copy to DMA buffer returned Size: %d\n", status);
if (status == -1) {
status = -EFAULT;
goto OUT_IOCTL;
}
}
-OUT_DMA:
-
OUT_IOCTL:
spin_unlock((&(priv->dma_lock)));
return status;