#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/sched.h>
#include "pexor_user.h"
#include "pexor_trb.h"
//#define PEXOR_DMA_MAXPOLLS 10000000
// JAM 2016: introduced again polling delay to make it independent of host hardware speed...
-#define PEXOR_DMA_MAXPOLLS 100000
+//#define PEXOR_DMA_MAXPOLLS 10000
+// JAM 2017: try with longer polling timeout -> 40 ms maximum
+#define PEXOR_DMA_MAXPOLLS 2000000
/** polling delay for each cycle in ns for dma complete bit*/
-#define PEXOR_DMA_POLLDELAY 20
+#define PEXOR_DMA_POLLDELAY 100
#define PEXOR_MEMWRITE_SIZE 128
+//#define UDELAY_TIME 1
+// JAM2017: maybe we are also sometimes too fast here?
+// 2 did not help really
+// 10 has even more problems with trbnetd cpu load
#define UDELAY_TIME 1
+/** JAM 2016 like mbspex driver: if set, we use a schedule() in the dma complete polling.
+ * Note: according to linux kernel book, yield() will just prepare this
+ * task to be scheduled in near future, but sched() will initiate the
+ * schedule directly
+ * this must not be enabled if dma completion is polled in interrupt tasklet*/
+#define PEXOR_DMA_POLL_SCHEDULE 1
+
+
+
struct pexor_dma
{
void* buffer;
return -1;
}
+int pexor_flush_fifo(struct pexor_privdata* priv,
+ unsigned int channel)
+{
+
+ /** JAM2017 this is mostly taken from fifo_flush of trbnet.c, adapted for driver registers:*/
+ volatile u32 val = 0;
+ unsigned int counter = 0;
+ unsigned int timeout = 0;
+ if (channel >= PEXOR_TRB_NUM_CHANNELS)
+ return -1;
+ do {
+ timeout = 0;
+ do {
+ val = ioread32(priv->pexor.trbnet_receiver_data[channel]);
+ rmb();
+ ndelay(20);
+ } while (((val & MASK_FIFO_VALID) == 0) &&
+ (++timeout < 1000));
+ /* DEBUG INFO */
+//#ifdef PEXOR_TRB_DEBUG
+ if ((val & MASK_FIFO_VALID) != 0) {
+ pexor_msg(KERN_ERR "FLUSH FIFO: %d: 0x%08x channel: %d\n",
+ counter, val, channel);
+ counter++;
+ }
+
+//#endif
+ if(counter>10000){
+ pexor_msg(KERN_ERR "too many fifo counts %d , abort flushing\n",counter);
+ break;
+ }
+ } while ((val & MASK_FIFO_VALID) != 0);
+ pexor_msg(KERN_ERR "FLUSH FIFO returns with timeout:%d: counter:%d\n",
+ timeout, counter);
+return 0;
+}
+
+
+
+
int pexor_ioctl_trbnet_request(struct pexor_privdata* priv,
unsigned long arg)
{
priv->pexor.trbnet_sender_trigger_info);
udelay(UDELAY_TIME);
iowrite32_mb((descriptor.arg0 & 0x0f) | PEXOR_TRB_CMD_SHORT_TRANSFER,
- priv->pexor.trbnet_sender_ctl[0]);
+ priv->pexor.trbnet_sender_ctl[0]);
udelay(UDELAY_TIME);
channel = 0;
break;
// JAM2016: try some slight waitstate before rereading the status
if (PEXOR_DMA_POLLDELAY)
ndelay(PEXOR_DMA_POLLDELAY);
+ if (PEXOR_DMA_POLL_SCHEDULE)
+ schedule (); // also allow system scheduler to run something else during our polling
}
/* sync current sg-entry back to CPU */
- dma_sync_single_for_cpu(priv->class_dev,
+ /*dma_sync_single_for_cpu(priv->class_dev,
sg_dma_address(&priv->dma.sglist[sg_ctr]),
sg_dma_len(&priv->dma.sglist[sg_ctr]),
DMA_FROM_DEVICE);
+ */
mb();
/* Check for kernel timeout */
if (loops == PEXOR_DMA_MAXPOLLS) {
pexor_msg(KERN_ERR
"ERROR> wait_dma_complete: polling longer than %d cycles "
- "(ndelay %d nanos) for dma complete! Status: 0x%08x\n",
+ "(ndelay%d nanos) for dma complete! Status: 0x%08x\n",
PEXOR_DMA_MAXPOLLS, PEXOR_DMA_POLLDELAY,
(unsigned int)dmastat);
/* reset DMA */
iowrite32_mb(PEXOR_TRB_DMA_RESET, priv->pexor.dma_control_stat);
udelay(1000);
/* do we need to flush the fifo-buffer, no libtrbnet takes care */
- status = -202;
- goto OUT_IOCTL;
+ pexor_flush_fifo(priv,3); /* JAM2017*/
+ pexor_msg(KERN_ERR "Fifo has been flushed!\n");
+ status = -202;
+ /*goto OUT_IOCTL;*/
}
-
+ /* JAM 2017: moved after maxpoll check - sync current sg-entry back to CPU */
+ dma_sync_single_for_cpu(priv->class_dev,
+ sg_dma_address(&priv->dma.sglist[sg_ctr]),
+ sg_dma_len(&priv->dma.sglist[sg_ctr]),
+ DMA_FROM_DEVICE);
+
+ mb();
+ if(status==-202) goto OUT_IOCTL;
+
/* Check status: do we need another dma for data */
if (dmaSize > 0) {
/* No, end DMA */