--- /dev/null
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+
+#include "pexor_user.h"
+#include "pexor_trb.h"
+
+//#define PEXOR_DEBUGPRINT
+//#define PEXOR_TRB_DEBUG
+
+#ifdef PEXOR_DEBUGPRINT
+#define pexor_dbg( args... ) \
+ printk( args );
+#else
+#define pexor_dbg( args... ) ;
+#endif
+
+
+#define pexor_msg( args... ) \
+ printk( args );
+
+/* ---------------------------------------------------------------------- */
+
+#define PEXORNAME "pexor"
+#define PEXORNAMEFMT "pexor-%d"
+
+#define PEXOR_VENDOR_ID 0x1204
+#define PEXOR_DEVICE_ID 0x5303
+#define PEXOR_MAX_DEVS 4
+static dev_t pexor_major;
+static struct class *pexor_class;
+static dev_t pexor_devt;
+static int my_major_nr = 0;
+
+struct dev_pexor
+{
+ u32 *irq_control; /* irq control register */
+ u32 *irq_status; /* irq status register */
+ u32 *dma_control_stat; /* dma control and statusregister */
+ u32 *dma_source; /* dma source address */
+ u32 *dma_dest; /* dma destination address */
+ u32 *dma_len; /* dma length */
+ u32 *dma_burstsize; /* dma burstsize, <=0x80 */
+ u32 *dma_statbits; /* optional further status bits */
+ u32 *dma_credits; /* credits */
+ u32 *dma_counts; /* counter values */
+ u32 *ram_start; /* RAM start */
+ u32 *ram_end; /* RAM end */
+ dma_addr_t ram_dma_base; /* RAM start expressed as dma address */
+ dma_addr_t ram_dma_cursor; /* cursor for next dma to issue start */
+ u32 *trbnet_sender_err[PEXOR_TRB_NUM_CHANNELS];
+ u32 *trbnet_sender_data[PEXOR_TRB_NUM_CHANNELS];
+ u32 *trbnet_receiver_data[PEXOR_TRB_NUM_CHANNELS];
+ u32 *trbnet_sender_ctl[PEXOR_TRB_NUM_CHANNELS];
+ u32 *trbnet_dma_ctl[PEXOR_TRB_NUM_CHANNELS];
+ u32 *trbnet_sender_trigger_info;
+ u32 *dma_debug0;
+ u32 *dma_debug1;
+
+ unsigned char init_done; /* object is ready flag */
+};
+
+#define DMA_BUFFER_NUM_PAGES ((8 * 1024 * 1024) / PAGE_SIZE)
+#define PEXOR_DMA_MAXPOLLS 10000
+#define PEXOR_DMA_POLLDELAY 0
+#define PEXOR_MEMWRITE_SIZE 128
+
+struct pexor_dma
+{
+ void *buffer;
+ size_t size;
+ int nr_pages;
+ struct scatterlist *sglist;
+ int nr_sglist;
+};
+
+struct pexor_privdata
+{
+ atomic_t state; /* run state of device */
+ dev_t devno; /* device number (major and minor) */
+ int devid; /* local id (counter number) */
+ struct pci_dev *pdev; /* PCI device */
+ struct device *class_dev; /* Class device */
+ struct cdev cdev; /* char device struct */
+ struct dev_pexor pexor; /* mapped pexor address pointers */
+ unsigned long bases[6]; /* contains pci resource bases */
+ unsigned long reglen[6]; /* contains pci resource length */
+ void *iomem[6]; /* points to mapped io memory of the bars */
+ struct semaphore sem; /* lock semaphore */
+ spinlock_t dma_lock; /* protects DMA Buffer */
+
+ struct pexor_dma dma; /* dma buffer */
+ u32 *memwrite_buffer; /* buffer for register_write_mem */
+ int magic; /* magic number to identify irq */
+};
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef PEXOR_DEBUGPRINT
+static unsigned char get_pci_revision(struct pci_dev *dev);
+#endif
+
+ssize_t pexor_sysfs_codeversion_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+void test_pci(struct pci_dev *dev);
+
+void cleanup_device(struct pexor_privdata *priv);
+
+struct pexor_privdata *get_privdata(struct file *filp);
+
+void set_pexor(struct dev_pexor *pg, void *membase, unsigned long bar);
+
+static int probe(struct pci_dev *dev, const struct pci_device_id *id);
+
+int pexor_open(struct inode *inode, struct file *filp);
+
+int pexor_release(struct inode *inode, struct file *filp);
+
+ssize_t pexor_read(struct file *filp, char __user * buf, size_t count,
+ loff_t * f_pos);
+
+ssize_t pexor_write(struct file *filp, const char __user * buf, size_t count,
+ loff_t * f_pos);
+
+loff_t pexor_llseek(struct file *filp, loff_t off, int whence);
+
+int pexor_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int pexor_ioctl_read_register(struct pexor_privdata *priv, unsigned long arg);
+
+int pexor_ioctl_write_register(struct pexor_privdata *priv,
+ unsigned long arg);
+
+int pexor_ioctl_trbnet_request(struct pexor_privdata *priv,
+ unsigned long arg);
+
+int pexor_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+static void remove(struct pci_dev *dev);
+
+static int __init pexor_init(void);
+
+static void __exit pexor_exit(void);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+long pexor_unlocked_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+static int pexor_alloc_dma_buffer(struct pexor_privdata *priv, size_t size);
+
+static int pexor_free_dma_buffer(struct pexor_privdata *priv);
+
+/* ---------------------------------------------------------------------- */
+
+static struct file_operations pexor_fops = {
+ .owner = THIS_MODULE,
+ .llseek = pexor_llseek,
+ .read = pexor_read,
+ .write = pexor_write,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+ .unlocked_ioctl = pexor_unlocked_ioctl,
+#else
+ .ioctl = pexor_ioctl,
+#endif
+ .mmap = pexor_mmap,
+ .open = pexor_open,
+ .release = pexor_release,
+};
+
+static struct pci_device_id ids[] = {
+ {PCI_DEVICE(PEXOR_VENDOR_ID, PEXOR_DEVICE_ID),},
+ {0,},
+};
+
+static struct pci_driver pci_driver = {
+ .name = PEXORNAME,
+ .id_table = ids,
+ .probe = probe,
+ .remove = remove,
+};
+
+
+
+
+//static DEVICE_ATTR(freebufs, S_IRUGO, pexor_sysfs_freebuffers_show, NULL);
+//static DEVICE_ATTR(usedbufs, S_IRUGO, pexor_sysfs_usedbuffers_show, NULL);
+//static DEVICE_ATTR(rcvbufs, S_IRUGO, pexor_sysfs_rcvbuffers_show, NULL);
+static DEVICE_ATTR(codeversion, S_IRUGO, pexor_sysfs_codeversion_show, NULL);
+//static DEVICE_ATTR(dmaregs, S_IRUGO, pexor_sysfs_dmaregs_show, NULL);
+
+#ifdef PEXOR_WITH_SFP
+static DEVICE_ATTR(sfpregs, S_IRUGO, pexor_sysfs_sfpregs_show, NULL);
+#endif
+
+static atomic_t pexor_numdevs = ATOMIC_INIT(0);
+
+/* ---------------------------------------------------------------------- */
+
+struct pexor_privdata *get_privdata(struct file *filp)
+{
+ struct pexor_privdata *privdata;
+ privdata = (struct pexor_privdata *)filp->private_data;
+ if (privdata->pexor.init_done == 0) {
+ pexor_dbg(KERN_ERR "*** PEXOR structure was not initialized!\n");
+ return NULL;
+ }
+ return privdata;
+}
+
+#ifdef PEXOR_DEBUGPRINT
+static unsigned char get_pci_revision(struct pci_dev *dev)
+{
+ u8 revision;
+ pci_read_config_byte(dev, PCI_REVISION_ID, &revision);
+ return revision;
+}
+#endif
+
+static void remove(struct pci_dev *dev)
+{
+ struct pexor_privdata *priv = (struct pexor_privdata *)pci_get_drvdata(dev);
+ cleanup_device(priv);
+
+ pexor_free_dma_buffer(priv);
+ if (priv->memwrite_buffer != NULL) {
+ vfree(priv->memwrite_buffer);
+ }
+ pexor_msg(KERN_NOTICE "PEXOR pci driver end remove.\n");
+}
+
+ssize_t pexor_sysfs_codeversion_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char vstring[1024];
+ ssize_t curs = 0;
+#ifdef PEXOR_WITH_SFP
+ struct dev_pexor *pg;
+#endif
+ struct pexor_privdata *privdata;
+ privdata = (struct pexor_privdata *)dev_get_drvdata(dev);
+ curs = snprintf(vstring, 1024,
+ "*** This is PEXOR driver version %s build on %s at %s \n\t",
+ PEXORVERSION, __DATE__, __TIME__);
+#ifdef PEXOR_WITH_SFP
+ pg = &(privdata->pexor);
+ pexor_show_version(&(pg->sfp), vstring + curs);
+#endif
+ return snprintf(buf, PAGE_SIZE, "%s\n", vstring);
+}
+
+
+void test_pci(struct pci_dev *dev)
+{
+ int bar = 0;
+ u32 originalvalue = 0;
+ u32 base = 0;
+ u16 comstat = 0;
+ u8 typ = 0;
+
+ pexor_dbg(KERN_NOTICE "\n test_pci found PCI revision number %x\n",
+ get_pci_revision(dev));
+
+ /*********** test the address regions*/
+ for (bar = 0; bar < 6; bar++) {
+ pexor_dbg(KERN_NOTICE "Resource %d start=%x\n", bar,
+ (unsigned)pci_resource_start(dev, bar));
+ pexor_dbg(KERN_NOTICE "Resource %d end=%x\n", bar,
+ (unsigned)pci_resource_end(dev, bar));
+ pexor_dbg(KERN_NOTICE "Resource %d len=%x\n", bar,
+ (unsigned)pci_resource_len(dev, bar));
+ pexor_dbg(KERN_NOTICE "Resource %d flags=%x\n", bar,
+ (unsigned)pci_resource_flags(dev, bar));
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_IO)) {
+ /* Ressource im IO-Adressraum */
+ pexor_dbg(KERN_NOTICE " - resource is IO\n");
+ }
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM)) {
+ pexor_dbg(KERN_NOTICE " - resource is MEM\n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_SPACE_IO)) {
+ pexor_dbg(KERN_NOTICE " - resource is PCI IO\n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_SPACE_MEMORY)) {
+ pexor_dbg(KERN_NOTICE " - resource is PCI MEM\n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_MEM_PREFETCH)) {
+ pexor_dbg(KERN_NOTICE " - resource prefetch bit is set \n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ pexor_dbg(KERN_NOTICE " - resource is 64bit address \n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_MEM_TYPE_32)) {
+ pexor_dbg(KERN_NOTICE " - resource is 32bit address \n");
+ }
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_PREFETCH)) {
+ pexor_dbg(KERN_NOTICE " - resource is prefetchable \n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_MEM_PREFETCH)) {
+ pexor_dbg(KERN_NOTICE " - resource is PCI mem prefetchable \n");
+ }
+ if ((pci_resource_flags(dev, bar) & PCI_BASE_ADDRESS_MEM_TYPE_1M)) {
+ pexor_dbg(KERN_NOTICE " - resource is PCI memtype below 1M \n");
+ }
+ }
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &originalvalue);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0xffffffff);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &base);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, originalvalue);
+ pexor_dbg("size of base address 0: %i\n", ~base + 1);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &originalvalue);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0xffffffff);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &base);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, originalvalue);
+ pexor_dbg("size of base address 1: %i\n", ~base + 1);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &originalvalue);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0xffffffff);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &base);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, originalvalue);
+ pexor_dbg("size of base address 2: %i\n", ~base + 1);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &originalvalue);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, 0xffffffff);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &base);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, originalvalue);
+ pexor_dbg("size of base address 3: %i\n", ~base + 1);
+
+ /***** here tests of configuration/status register:******/
+ pci_read_config_word(dev, PCI_COMMAND, &comstat);
+ pexor_dbg("\n**** Command register is: %d\n", comstat);
+ pci_read_config_word(dev, PCI_STATUS, &comstat);
+ pexor_dbg("\n**** Status register is: %d\n", comstat);
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &typ);
+ pexor_dbg("\n**** Header type is: %d\n", typ);
+}
+
+void cleanup_device(struct pexor_privdata *priv)
+{
+ int j = 0;
+ struct pci_dev *pcidev;
+
+ if (!priv) {
+ return;
+ }
+
+ /* sysfs device cleanup */
+ if (priv->class_dev) {
+#ifdef PEXOR_WITH_SFP
+ device_remove_file(priv->class_dev, &dev_attr_sfpregs);
+#endif
+ //device_remove_file(priv->class_dev, &dev_attr_dmaregs);
+ device_remove_file(priv->class_dev, &dev_attr_codeversion);
+ //device_remove_file(priv->class_dev, &dev_attr_rcvbufs);
+ //device_remove_file(priv->class_dev, &dev_attr_usedbufs);
+ //device_remove_file(priv->class_dev, &dev_attr_freebufs);
+ device_destroy(pexor_class, priv->devno);
+ priv->class_dev = 0;
+ }
+
+ /* character device cleanup */
+ if (priv->cdev.owner) {
+ cdev_del(&priv->cdev);
+ }
+ if (priv->devid) {
+ atomic_dec(&pexor_numdevs);
+ }
+ pcidev = priv->pdev;
+ if (!pcidev) {
+ return;
+ }
+
+ for (j = 0; j < 6; j++) {
+ if (priv->bases[j] == 0)
+ continue;
+ if ((pci_resource_flags(pcidev, j) & IORESOURCE_IO)) {
+ pexor_dbg(KERN_NOTICE " releasing IO region at:%lx -len:%lx \n",
+ priv->bases[j], priv->reglen[j]);
+ release_region(priv->bases[j], priv->reglen[j]);
+ } else {
+ if (priv->iomem[j] != 0) {
+ pexor_dbg(KERN_NOTICE
+ " unmapping virtual MEM region at:%lx -len:%lx \n",
+ (unsigned long)priv->iomem[j], priv->reglen[j]);
+ iounmap(priv->iomem[j]);
+ }
+ pexor_dbg(KERN_NOTICE " releasing MEM region at:%lx -len:%lx \n",
+ priv->bases[j], priv->reglen[j]);
+ release_mem_region(priv->bases[j], priv->reglen[j]);
+ }
+ priv->bases[j] = 0;
+ priv->reglen[j] = 0;
+ }
+ kfree(priv);
+ pci_disable_device(pcidev);
+}
+
+
+void set_pexor(struct dev_pexor *pg, void *membase, unsigned long bar)
+{
+ int i = 0;
+ void *dmabase = 0;
+
+ if (pg == NULL)
+ return;
+ dmabase = membase + PEXOR_DMA_BASE;
+
+#ifdef PEXOR_WITH_SFP
+ set_sfp(&(pg->sfp), membase, bar);
+#endif
+
+ pg->dma_control_stat = (u32 *) (dmabase + (PEXOR_TRB_DMA_CTL << 2));
+ pg->dma_source = (u32 *) (dmabase + 0xff0);
+ pg->dma_dest = (u32 *) (dmabase + (PEXOR_TRB_DMA_ADD << 2));
+ pg->dma_len = (u32 *) (dmabase + (PEXOR_TRB_DMA_LEN << 2));
+ pg->dma_burstsize = (u32 *) (dmabase + (PEXOR_TRB_DMA_BST << 2));
+ pg->dma_statbits = (u32 *) (dmabase + (PEXOR_TRB_DMA_STA << 2));
+ pg->dma_credits = (u32 *) (dmabase + (PEXOR_TRB_DMA_CRE << 2));
+ pg->dma_counts = (u32 *) (dmabase + (PEXOR_TRB_DMA_CNT << 2));
+
+ pg->trbnet_sender_trigger_info =
+ (u32 *) (membase + (PEXOR_TRB_SENDER_TRIGGER_INFO << 2));
+ for (i = 0; i < PEXOR_TRB_NUM_CHANNELS; i++) {
+ pg->trbnet_sender_err[i] = (u32 *)
+ (membase + ((PEXOR_TRB_SENDER_ERROR | ((i * 2 + 1) << 4)) << 2));
+ pg->trbnet_sender_data[i] = (u32 *)
+ (membase + ((PEXOR_TRB_SENDER_DATA | ((i * 2 + 1) << 4)) << 2));
+ pg->trbnet_receiver_data[i] = (u32 *)
+ (membase + ((PEXOR_TRB_RECEIVER_DATA | ((i * 2 + 1) << 4)) << 2));
+ pg->trbnet_sender_ctl[i] = (u32 *)
+ (membase + ((PEXOR_TRB_SENDER_CONTROL | ((i * 2 + 1) << 4)) << 2));
+ pg->trbnet_dma_ctl[i] = (u32 *) (membase + (PEXOR_TRB_DMA_CRE << 2));
+ }
+
+
+ pg->dma_debug0 = (u32 *) (membase + (0xe00 << 2));
+ pg->dma_debug1 = (u32 *) (membase + (0xe01 << 2));
+
+ pg->init_done = 0x1;
+ pexor_dbg(KERN_NOTICE "** Set pexor structure %lx.\n",
+ (long unsigned int)pg);
+}
+
+static int probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int err = 0, ix = 0;
+
+ struct pexor_privdata *privdata;
+ pexor_msg(KERN_NOTICE "PEXOR pci driver starts probe...\n");
+ if ((err = pci_enable_device(dev)) != 0) {
+ pexor_msg(KERN_ERR
+ "PEXOR pci driver probe: Error %d enabling PCI device! \n",
+ err);
+ return -ENODEV;
+ }
+ pexor_dbg(KERN_NOTICE "PEXOR Device is enabled.\n");
+
+ /* Set Memory-Write-Invalidate support */
+ if (!pci_set_mwi(dev)) {
+ pexor_dbg(KERN_NOTICE "MWI enabled.\n");
+ } else {
+ pexor_dbg(KERN_NOTICE "MWI not supported.\n");
+ }
+ pci_set_master(dev); /* NOTE: DMA worked without, but maybe
+ depends on bios */
+
+ test_pci(dev);
+
+ /* Allocate and initialize the private data for this device */
+ privdata = kmalloc(sizeof(struct pexor_privdata), GFP_KERNEL);
+ if (privdata == NULL) {
+ cleanup_device(privdata);
+ return -ENOMEM;
+ }
+ memset(privdata, 0, sizeof(struct pexor_privdata));
+ pci_set_drvdata(dev, privdata);
+ privdata->pdev = dev;
+ privdata->magic = PEXOR_DEVICE_ID; /* for irq test TODO: what if multiple
+ pexors share same irq? */
+
+ atomic_set(&(privdata->state), PEXOR_STATE_STOPPED);
+
+ for (ix = 0; ix < 6; ix++) {
+ privdata->bases[ix] = pci_resource_start(dev, ix);
+ privdata->reglen[ix] = pci_resource_len(dev, ix);
+ if (privdata->bases[ix] == 0)
+ continue;
+ if ((pci_resource_flags(dev, ix) & IORESOURCE_IO)) {
+
+ pexor_dbg(KERN_NOTICE " - Requesting io ports for bar %d\n", ix);
+ if (request_region(privdata->bases[ix], privdata->reglen[ix],
+ dev->dev.kobj.name) == NULL) {
+ pexor_dbg(KERN_ERR
+ "I/O address conflict at bar %d for device \"%s\"\n", ix,
+ dev->dev.kobj.name);
+ cleanup_device(privdata);
+ return -EIO;
+ }
+ pexor_dbg("requested ioport at %lx with length %lx\n",
+ privdata->bases[ix], privdata->reglen[ix]);
+ } else if ((pci_resource_flags(dev, ix) & IORESOURCE_MEM)) {
+ pexor_dbg(KERN_NOTICE " - Requesting memory region for bar %d\n", ix);
+ if (request_mem_region(privdata->bases[ix], privdata->reglen[ix],
+ dev->dev.kobj.name) == NULL) {
+ pexor_dbg(KERN_ERR
+ "Memory address conflict at bar %d for device \"%s\"\n", ix,
+ dev->dev.kobj.name);
+ cleanup_device(privdata);
+ return -EIO;
+ }
+ pexor_dbg("requested memory at %lx with length %lx\n",
+ privdata->bases[ix], privdata->reglen[ix]);
+ privdata->iomem[ix] =
+ ioremap_nocache(privdata->bases[ix], privdata->reglen[ix]);
+ if (privdata->iomem[ix] == NULL) {
+ pexor_dbg(KERN_ERR
+ "Could not remap memory at bar %d for device \"%s\"\n", ix,
+ dev->dev.kobj.name);
+ cleanup_device(privdata);
+ return -EIO;
+ }
+ pexor_dbg("remapped memory to %lx with length %lx\n",
+ (unsigned long)privdata->iomem[ix], privdata->reglen[ix]);
+ }
+ } //for
+ set_pexor(&(privdata->pexor), privdata->iomem[0], privdata->bases[0]);
+
+ //print_pexor(&(privdata->pexor));
+
+ sema_init(&(privdata->sem), 1);
+ spin_lock_init(&(privdata->dma_lock));
+
+ /* TODO may use rw semaphore instead? init_rwsem(struct
+ rw_semaphore *sem); */
+
+
+ ////////////////// here chardev registering
+ privdata->devid = atomic_inc_return(&pexor_numdevs) - 1;
+ if (privdata->devid >= PEXOR_MAX_DEVS) {
+ pexor_msg(KERN_ERR
+ "Maximum number of devices reached! Increase MAXDEVICES.\n");
+ cleanup_device(privdata);
+ return -ENOMSG;
+ }
+
+ privdata->devno
+ = MKDEV(MAJOR(pexor_devt), MINOR(pexor_devt) + privdata->devid);
+
+ /* Register character device */
+ cdev_init(&(privdata->cdev), &pexor_fops);
+ privdata->cdev.owner = THIS_MODULE;
+ privdata->cdev.ops = &pexor_fops;
+ err = cdev_add(&privdata->cdev, privdata->devno, 1);
+ if (err) {
+ pexor_msg("Couldn't add character device.\n");
+ cleanup_device(privdata);
+ return err;
+ }
+
+ /* TODO: export special things to class in sysfs ? */
+
+
+ if (!IS_ERR(pexor_class)) {
+ /* driver init had successfully created class, now we create device: */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+ privdata->class_dev = device_create(pexor_class, NULL, privdata->devno,
+ privdata, PEXORNAMEFMT,
+ MINOR(pexor_devt) + privdata->devid);
+#else
+ privdata->class_dev = device_create(pexor_class, NULL, privdata->devno,
+ PEXORNAMEFMT,
+ MINOR(pexor_devt) + privdata->devid);
+#endif
+ dev_set_drvdata(privdata->class_dev, privdata);
+ pexor_msg(KERN_NOTICE "Added PEXOR device: %s-%d\n",
+ PEXORNAME, MINOR(pexor_devt) + privdata->devid);
+
+#if 0
+ if (device_create_file(privdata->class_dev, &dev_attr_freebufs) != 0) {
+ pexor_msg(KERN_ERR
+ "Could not add device file node for free buffers.\n");
+ }
+ if (device_create_file(privdata->class_dev, &dev_attr_usedbufs) != 0) {
+ pexor_msg(KERN_ERR
+ "Could not add device file node for used buffers.\n");
+ }
+ if (device_create_file(privdata->class_dev, &dev_attr_rcvbufs) != 0) {
+ pexor_msg(KERN_ERR
+ "Could not add device file node for receive buffers.\n");
+ }
+#endif
+ if (device_create_file(privdata->class_dev, &dev_attr_codeversion) != 0) {
+ pexor_msg(KERN_ERR
+ "Could not add device file node for code version.\n");
+ }
+#if 0
+ if (device_create_file(privdata->class_dev, &dev_attr_dmaregs) != 0) {
+ pexor_msg(KERN_ERR
+ "Could not add device file node for dma registers.\n");
+ }
+#endif
+#ifdef PEXOR_WITH_SFP
+ if (device_create_file(privdata->class_dev, &dev_attr_sfpregs) != 0) {
+ pexor_msg(KERN_ERR
+ "Could not add device file node for sfp registers.\n");
+ }
+#endif
+
+ } else {
+ /* something was wrong at class creation, we skip sysfs device
+ support here: */
+ pexor_msg(KERN_ERR "Could not add PEXOR device node to /dev !");
+ }
+
+ /* init pexor_dma buffer */
+ if (pexor_alloc_dma_buffer(privdata, DMA_BUFFER_NUM_PAGES * PAGE_SIZE) != 0) {
+ pexor_msg(KERN_ERR "ERROR> probe init dma failed\n");
+ return -EFAULT;
+ }
+
+ /* allocate memwrite_buffer */
+ privdata->memwrite_buffer = vmalloc_32(PEXOR_MEMWRITE_SIZE * 4);
+ if (privdata->memwrite_buffer == NULL) {
+ pexor_msg(KERN_ERR "ERROR> probe allocate memwrite_buffer failed\n");
+ return -ENOMEM;
+ }
+
+ pexor_msg(KERN_NOTICE "probe has finished.\n");
+ return 0;
+}
+
+int pexor_open(struct inode *inode, struct file *filp)
+{
+ struct pexor_privdata *privdata;
+ pexor_dbg(KERN_NOTICE "** starting pexor_open...\n");
+ /* Set the private data area for the file */
+ privdata = container_of(inode->i_cdev, struct pexor_privdata, cdev);
+ filp->private_data = privdata;
+ return 0;
+}
+
+int pexor_release(struct inode *inode, struct file *filp)
+{
+ pexor_dbg(KERN_NOTICE "** starting pexor_release...\n");
+ return 0;
+}
+
+loff_t pexor_llseek(struct file * filp, loff_t off, int whence)
+{
+ return 0;
+}
+
+ssize_t pexor_read(struct file * filp,
+ char __user * buf, size_t count, loff_t * f_pos)
+{
+ /* here we read from dma buffer */
+ struct pexor_privdata *privdata;
+ ssize_t retval = 0;
+
+ /* u32 kbuf[lcount]; */
+ pexor_dbg(KERN_NOTICE "** starting pexor_read for f_pos=%d count=%d\n",
+ (int)*f_pos, (int)count);
+ privdata = get_privdata(filp);
+ if (privdata == NULL) {
+ return -EFAULT;
+ }
+ if (count > privdata->dma.size) {
+ return -EFAULT;
+ }
+
+ if (down_interruptible(&privdata->sem) != 0) {
+ return -ERESTARTSYS;
+ }
+ if (copy_to_user(buf, privdata->dma.buffer, count)) {
+ retval = -EFAULT;
+ goto out_read;
+ }
+ *f_pos += count;
+ retval = count;
+
+out_read:
+ up(&privdata->sem);
+ return retval;
+}
+
+ssize_t pexor_write(struct file * filp,
+ const char __user * buf, size_t count, loff_t * f_pos)
+{
+ /* here we read from dma buffer */
+ struct pexor_privdata *privdata;
+ ssize_t retval = 0;
+
+ /* u32 kbuf[lcount]; */
+ pexor_dbg(KERN_NOTICE "** starting pexor_write for f_pos=%d count=%d\n",
+ (int)*f_pos, (int)count);
+ privdata = get_privdata(filp);
+ if (privdata == NULL) {
+ return -EFAULT;
+ }
+ if (count > 4 * PEXOR_MEMWRITE_SIZE) {
+ return -EFAULT;
+ }
+
+ if (down_interruptible(&privdata->sem) != 0) {
+ return -ERESTARTSYS;
+ }
+ if (copy_from_user((void *)privdata->memwrite_buffer, buf, count)) {
+ retval = -EFAULT;
+ goto out_read;
+ }
+ *f_pos += count;
+ retval = count;
+
+out_read:
+ up(&privdata->sem);
+ return retval;
+}
+
+int pexor_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct pexor_privdata *privdata;
+ char *vmalloc_area_ptr = NULL;
+ unsigned long size;
+ unsigned long start;
+ unsigned long pfn;
+ int ret = 0;
+
+ privdata = get_privdata(filp);
+ pexor_dbg(KERN_NOTICE "** starting pexor_mmap...\n");
+
+ if (privdata == NULL)
+ return -EFAULT;
+
+ vmalloc_area_ptr = privdata->dma.buffer;
+ start = vma->vm_start;
+ size = (vma->vm_end - vma->vm_start);
+
+ pexor_dbg(KERN_NOTICE
+ "** starting pexor_mmap for s:%lx e:%lx size=%ld\n",
+ vma->vm_start, vma->vm_end, size);
+
+ if (size > privdata->dma.size) {
+ return -EFAULT;
+ }
+
+ while (size > 0) {
+ pfn = vmalloc_to_pfn(vmalloc_area_ptr);
+ if ((ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) {
+ pexor_dbg(KERN_ERR "kmem remap failed: %d (%p)\n",
+ ret, privdata->dma.buffer);
+ return ret;
+ }
+ start += PAGE_SIZE;
+ vmalloc_area_ptr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+#if 0
+ /* map kernel addresses to vma */
+ // vma->vm_flags |= (VM_RESERVED); /* TODO: do we need this?*/
+ /* der scheiss geht nicht!!! */
+ ret = remap_vmalloc_range(vma, privdata->dma.buffer, 0);
+#endif
+
+ return 0;
+}
+
+int pexor_ioctl_read_register(struct pexor_privdata *priv, unsigned long arg)
+{
+ int retval = 0;
+ u32 *ad = 0;
+ u32 val = 0;
+ int bar = 0;
+ struct pexor_reg_io descriptor;
+ retval =
+ copy_from_user(&descriptor, (void __user *)arg,
+ sizeof(struct pexor_reg_io));
+ if (retval)
+ return retval;
+ ad = (u32 *) (ptrdiff_t) descriptor.address;
+ pexor_dbg(KERN_NOTICE "** pexor_ioctl_reading from register address %p\n",
+ ad);
+ bar = descriptor.bar;
+ if ((bar > 5) || priv->iomem[bar] == 0) {
+ pexor_msg(KERN_ERR "** pexor_ioctl_read_register: no mapped bar %d\n",
+ bar);
+ return -EIO;
+ }
+ pexor_dbg(KERN_NOTICE
+ "** pexor_ioctl_read_register reads from address %p within "
+ "bar %d \n", ad, bar);
+ if ((unsigned long)ad > priv->reglen[bar]) {
+ pexor_msg(KERN_ERR
+ "** pexor_ioctl_read_register: address %p is exceeding "
+ "length %lx of bar %d\n", ad, priv->reglen[bar], bar);
+ return -EIO;
+ }
+ ad = (u32 *) ((unsigned long)priv->iomem[bar] + (unsigned long)ad);
+ val = ioread32(ad);
+ mb();
+ ndelay(20);
+ pexor_dbg(KERN_NOTICE
+ "** pexor_ioctl_read_register read value %x from mapped "
+ "PCI address %p !\n", val, ad);
+ descriptor.value = val;
+ retval =
+ copy_to_user((void __user *)arg, &descriptor,
+ sizeof(struct pexor_reg_io));
+ return retval;
+}
+
+int pexor_ioctl_write_register(struct pexor_privdata *priv, unsigned long arg)
+{
+ int retval = 0;
+ u32 *ad = 0;
+ u32 val = 0;
+ int bar = 0;
+ struct pexor_reg_io descriptor;
+ retval =
+ copy_from_user(&descriptor, (void __user *)arg,
+ sizeof(struct pexor_reg_io));
+ if (retval)
+ return retval;
+ /* here we assume something for this very connection, to be adjusted later */
+ ad = (u32 *) (ptrdiff_t) descriptor.address;
+ val = (u32) descriptor.value;
+ bar = descriptor.bar;
+ if ((bar > 5) || priv->iomem[bar] == 0) {
+ pexor_msg(KERN_ERR "** pexor_ioctl_write_register: no mapped bar %d\n",
+ bar);
+ return -EIO;
+ }
+ pexor_dbg(KERN_NOTICE
+ "** pexor_ioctl_write_register writes value %x to address %p "
+ "within bar %d \n", val, ad, bar);
+ if ((unsigned long)ad > priv->reglen[bar]) {
+ pexor_msg(KERN_ERR
+ "** pexor_ioctl_write_register: address %p is exceeding "
+ "length %lx of bar %d\n", ad, priv->reglen[bar], bar);
+ return -EIO;
+ }
+ ad = (u32 *) ((unsigned long)priv->iomem[bar] + (unsigned long)ad);
+ pexor_dbg(KERN_NOTICE
+ "** pexor_ioctl_write_register writes value %x to mapped "
+ "PCI address %p !\n", val, ad);
+ iowrite32(val, ad);
+ mb();
+ ndelay(20);
+ return retval;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+long pexor_unlocked_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = pexor_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+#endif
+
+int pexor_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct pexor_privdata *privdata;
+ /* here validity check for magic number etc. */
+
+ privdata = get_privdata(filp);
+ if (!privdata)
+ return -EFAULT;
+ /* Select the appropiate command */
+ switch (cmd) {
+
+ /* first all common ioctls: */
+
+ case PEXOR_IOC_WRITE_REGISTER:
+ pexor_dbg(KERN_NOTICE "** pexor_ioctl write register\n");
+ return pexor_ioctl_write_register(privdata, arg);
+ break;
+
+ case PEXOR_IOC_READ_REGISTER:
+ pexor_dbg(KERN_NOTICE "** pexor_ioctl read register\n");
+ return pexor_ioctl_read_register(privdata, arg);
+ break;
+
+
+ /* special ioctls for different protocol implementations: */
+
+ case PEXOR_IOC_TRBNET_REQUEST:
+ pexor_dbg(KERN_NOTICE "** pexor_ioctl trbnet request \n");
+ return pexor_ioctl_trbnet_request(privdata, arg);
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+ return -ENOTTY;
+}
+
+static int __init pexor_init(void)
+{
+
+ int result;
+ pexor_msg(KERN_NOTICE "pexor driver init...\n");
+
+ pexor_devt = MKDEV(my_major_nr, 0);
+
+ /*
+ * Register your major, and accept a dynamic number.
+ */
+ if (my_major_nr)
+ result = register_chrdev_region(pexor_devt, PEXOR_MAX_DEVS, PEXORNAME);
+ else {
+ result = alloc_chrdev_region(&pexor_devt, 0, PEXOR_MAX_DEVS, PEXORNAME);
+ my_major_nr = MAJOR(pexor_devt);
+ }
+ if (result < 0)
+ return result;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
+ pexor_class = class_create(THIS_MODULE, PEXORNAME);
+ if (IS_ERR(pexor_class)) {
+ pexor_msg(KERN_ALERT "Could not create class for sysfs support!\n");
+ }
+#endif
+
+ if (pci_register_driver(&pci_driver) < 0) {
+ pexor_msg(KERN_ALERT "pci driver could not register!\n");
+ unregister_chrdev_region(pexor_devt, PEXOR_MAX_DEVS);
+ return -EIO;
+ }
+
+ pexor_msg(KERN_NOTICE
+ "\t\tdriver init with registration for major no %d done.\n",
+ my_major_nr);
+ return 0;
+
+ /* note: actual assignment will be done on probe time */
+
+
+}
+
+#if 0
+int __init pexor_init(void)
+{
+ dev_t dev;
+ int result;
+ pexor_msg(KERN_NOTICE "pexor driver init...\n");
+
+ /* Register a dynamic major number */
+ result = alloc_chrdev_region(&dev, 0, PEXOR_MAX_DEVS, PEXORNAME);
+ if (result != 0) {
+ pexor_msg(KERN_ALERT "failed to get a major number for pexor\n");
+ goto out_init;
+ }
+ pexor_major = MAJOR(dev);
+
+ /* create module class */
+ pexor_class = class_create(THIS_MODULE, PEXORNAME);
+ if (IS_ERR(pexor_class)) {
+ result = PTR_ERR(pexor_class);
+ pexor_msg(KERN_ALERT
+ "could not create class for sysfs support, error = %x\n",
+ result);
+ goto out_unreg_chrdev;
+ }
+
+ /* register PCI driver */
+ result = pci_register_driver(&pci_driver);
+ if (result != 0) {
+ pexor_msg(KERN_ALERT "could not register pexor pci driver\n");
+ goto out_unreg_chrdev;
+ }
+
+ pexor_msg(KERN_NOTICE
+ "\t\tdriver init with registration for major no %d done.\n",
+ pexor_major);
+
+ pexor_devt = dev;
+
+ return 0;
+
+ /* note: actual assignment will be done on probe time */
+
+out_unreg_chrdev:
+ unregister_chrdev_region(MKDEV(pexor_major, 0), PEXOR_MAX_DEVS);
+
+out_init:
+ return result;
+}
+#endif
+
+static void __exit pexor_exit(void)
+{
+ pexor_msg(KERN_NOTICE "pexor driver exit...\n");
+
+ pci_unregister_driver(&pci_driver);
+ class_destroy(pexor_class);
+ unregister_chrdev_region(MKDEV(pexor_major, 0), PEXOR_MAX_DEVS);
+
+ pexor_msg(KERN_NOTICE "\t\tdriver exit done.\n");
+}
+
+
+/* ------ TRBNet Specific ----------------------------------------------- */
+
+static int pexor_free_dma_buffer(struct pexor_privdata *priv)
+{
+ /* free sglist */
+ if (priv->dma.sglist != NULL) {
+ if (priv->dma.nr_sglist > 0) {
+ dma_unmap_sg(priv->class_dev,
+ priv->dma.sglist, priv->dma.nr_pages, DMA_FROM_DEVICE);
+ }
+ kfree(priv->dma.sglist);
+ }
+
+ /* free DMA-Buffer */
+ if (priv->dma.buffer != NULL) {
+ vfree(priv->dma.buffer);
+ }
+
+ priv->dma.buffer = NULL;
+ priv->dma.size = 0;
+ priv->dma.nr_pages = 0;
+ priv->dma.sglist = NULL;
+ priv->dma.nr_sglist = 0;
+
+ pexor_dbg(KERN_NOTICE "INFO> pexor_free_dma_buffer: success\n");
+ return 0;
+}
+
+static int pexor_alloc_dma_buffer(struct pexor_privdata *priv, size_t size)
+{
+ struct page *pg = NULL;
+ unsigned long uaddr = 0;
+ unsigned int i;
+
+ priv->dma.buffer = NULL;
+ priv->dma.size = 0;
+ priv->dma.nr_pages = 0;
+ priv->dma.sglist = NULL;
+ priv->dma.nr_sglist = 0;
+
+ /* allocate DAM-Buffer */
+ priv->dma.buffer = vmalloc_32(size);
+ if (priv->dma.buffer == NULL) {
+ pexor_msg(KERN_ERR
+ "ERROR> pexor_alloc_dma_buffer: vmalloc failed buffer\n");
+ return -ENOMEM;
+ }
+ memset(priv->dma.buffer, 0, size);
+ uaddr = (unsigned long)priv->dma.buffer;
+ priv->dma.nr_pages =
+ ((uaddr + size + PAGE_SIZE - 1) >> PAGE_SHIFT) - (uaddr >> PAGE_SHIFT);
+ priv->dma.size = size;
+
+ /* allocate scatterlist */
+ priv->dma.sglist =
+ kcalloc(priv->dma.nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
+ if (priv->dma.sglist == NULL) {
+ pexor_msg(KERN_ERR
+ "ERROR> pexor_alloc_dma_buffer: kmalloc failed scatterlist\n");
+ pexor_free_dma_buffer(priv);
+ return -ENOMEM;
+ }
+ memset(priv->dma.sglist, 0,
+ sizeof(struct scatterlist) * priv->dma.nr_pages);
+
+ /* fill scatterlist */
+ pexor_msg(KERN_ERR
+ "INFO> uaddr: %lx, dma.buffer: %p nr_pages: %d\n",
+ uaddr, priv->dma.buffer, priv->dma.nr_pages);
+ for (i = 0; i < priv->dma.nr_pages; i++) {
+ pg = vmalloc_to_page(priv->dma.buffer + i * PAGE_SIZE);
+ if (pg == NULL) {
+ pexor_msg(KERN_ERR
+ "ERROR> pexor_alloc_dma_buffer: vmalloc_to_page failed\n");
+ pexor_free_dma_buffer(priv);
+ return -EFAULT;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+ priv->dma.sglist[i].page_link = (unsigned long)pg;
+#else
+ priv->dma.sglist[i].page = pg;
+#endif
+ priv->dma.sglist[i].offset = (i == 0)
+ ? offset_in_page(uaddr)
+ : 0;
+ priv->dma.sglist[i].length = ((i + 1) == priv->dma.nr_pages)
+ ? (uaddr + priv->dma.size) - ((uaddr + priv->dma.size - 1) & PAGE_MASK)
+ : PAGE_SIZE;
+
+#ifdef PEXOR_TRB_DEBUG
+ pexor_msg(KERN_ERR
+ "INFO> sglist[%d] page: %p, index: %ld, offset: %d, "
+ "length: %d\n",
+ i, pg, pg->index, priv->dma.sglist[i].offset,
+ priv->dma.sglist[i].length);
+#endif
+ }
+ priv->dma.nr_sglist = dma_map_sg(priv->class_dev,
+ priv->dma.sglist,
+ priv->dma.nr_pages, DMA_FROM_DEVICE);
+ pexor_msg(KERN_ERR "INFO> nr_sglist: %d\n", priv->dma.nr_sglist);
+
+ if (priv->dma.nr_sglist == 0) {
+ pexor_msg(KERN_ERR "ERROR> pexor_alloc_dma_buffer: dma_map_sg failed\n");
+ pexor_free_dma_buffer(priv);
+ return -EFAULT;
+ }
+ pexor_dbg(KERN_NOTICE "INFO> pexor_alloc_dma_buffer: success\n");
+
+ return 0;
+}
+
+#define MASK_FIFO_VALID (0x01 << 24)
+#define MASK_FIFO_TRB_ACT (0x01 << 25)
+#define MAX_FIFO_TIMEOUT 5000000
+#define SHIFT_HEADER_TYPE 0x0000
+#define MASK_HEADER_TYPE (0x0007 << SHIFT_HEADER_TYPE)
+#define HEADER_TRM 0x0003
+
+static int pexor_copy_fifo_to_dma(struct pexor_privdata *priv,
+ unsigned int channel)
+{
+ unsigned int sg_ctr;
+ unsigned int sg_length;
+ unsigned int timeout = 0;
+ unsigned int ctr = 0;
+ volatile u32 val = 0;
+ unsigned int i;
+
+ if (channel >= PEXOR_TRB_NUM_CHANNELS)
+ return -1;
+
+ for (sg_ctr = 0; sg_ctr < priv->dma.nr_sglist; sg_ctr++) {
+ /* sync current sg-entry to CPU */
+ dma_sync_single_for_cpu(priv->class_dev,
+ sg_dma_address(&priv->dma.sglist[sg_ctr]),
+ sg_dma_len(&priv->dma.sglist[sg_ctr]),
+ DMA_FROM_DEVICE);
+ sg_length = sg_dma_len(&priv->dma.sglist[sg_ctr]);
+ for (i = 0; i < sg_length; i++) {
+ timeout = 0;
+ do {
+ val = ioread32(priv->pexor.trbnet_receiver_data[channel]);
+ if ((val & MASK_FIFO_TRB_ACT) == 0) {
+ return ctr;
+ }
+ } while (((val & MASK_FIFO_VALID) == 0) &&
+ (++timeout < MAX_FIFO_TIMEOUT));
+ if (timeout >= MAX_FIFO_TIMEOUT) {
+ pexor_msg(KERN_ERR
+ "ERROR> copy_fifo_to_dma: FIFO Timeout channel %d\n",
+ channel);
+ pexor_msg(KERN_ERR "ERROR> last val: 0x%08x\n", val);
+
+ /* to be done: flush FIFO here */
+ return -1;
+ }
+ ((u32 *) priv->dma.buffer)[ctr] = val;
+#ifdef PEXOR_TRB_DEBUG
+ pexor_msg(KERN_ERR "FIFO COPY: %d: 0x%08x channel: %d\n",
+ ctr, ((u32 *) priv->dma.buffer)[ctr], channel);
+#endif
+ ctr++;
+ }
+
+ }
+
+ /* should not happen,out of buffers etc */
+ /* to be done: flush FIFO here */
+ return -1;
+}
+
+int pexor_ioctl_trbnet_request(struct pexor_privdata *priv, unsigned long arg)
+{
+ int status = -1;
+ unsigned int channel = 3;
+ int command;
+ volatile u32 dmastat = 0;
+ unsigned int sg_ctr = 0;
+ unsigned int dmaSize = 0;
+ struct pexor_trbnet_io descriptor;
+
+ spin_lock((&(priv->dma_lock)));
+
+ if (copy_from_user(&descriptor,
+ (void __user *)arg, sizeof(struct pexor_trbnet_io))) {
+ status = -EFAULT;
+ goto OUT_IOCTL;
+ }
+
+ /* execute command */
+ command = descriptor.command;
+
+ switch (command) {
+ case PEXOR_TRBNETCOM_REG_WRITE:
+ // first send trbnet request
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_err[3]);
+ iowrite32(descriptor.reg_address, priv->pexor.trbnet_sender_data[3]);
+ iowrite32((descriptor.arg0 >> 16) & 0xffff,
+ priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg0 & 0xffff, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32((((u32) descriptor.trb_address << 16) |
+ PEXOR_TRB_CMD_REGISTER_WRITE),
+ priv->pexor.trbnet_sender_ctl[3]);
+ break;
+
+ case PEXOR_TRBNETCOM_REG_WRITE_MEM:
+ {
+ unsigned int i;
+ if (descriptor.arg1 > PEXOR_MEMWRITE_SIZE) {
+ pexor_msg(KERN_ERR "ERROR> REG_WRITE_MEM: invalid size%x\n", command);
+ status = -EFAULT;
+ goto OUT_IOCTL;
+ }
+
+ // first send trbnet request
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_err[3]);
+ iowrite32(descriptor.reg_address, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg0, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ for (i = 0; (i < descriptor.arg1) && (i < PEXOR_MEMWRITE_SIZE); i++) {
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32((priv->memwrite_buffer[i] >> 16) & 0xffff,
+ priv->pexor.trbnet_sender_data[3]);
+ iowrite32(priv->memwrite_buffer[i] & 0xffff,
+ priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ }
+ iowrite32((((u32) descriptor.trb_address << 16) |
+ PEXOR_TRB_CMD_REGISTER_WRITE_MEM),
+ priv->pexor.trbnet_sender_ctl[3]);
+ }
+ break;
+
+ case PEXOR_TRBNETCOM_REG_READ:
+ // first send trbnet request
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_err[3]);
+ iowrite32(descriptor.reg_address, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32((((u32) descriptor.trb_address << 16) |
+ PEXOR_TRB_CMD_REGISTER_READ),
+ priv->pexor.trbnet_sender_ctl[3]);
+ break;
+
+ case PEXOR_TRBNETCOM_REG_READ_MEM:
+ // first send trbnet request
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_err[3]);
+ iowrite32(descriptor.reg_address, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg0, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32((((u32) descriptor.trb_address << 16) |
+ PEXOR_TRB_CMD_REGISTER_READ_MEM),
+ priv->pexor.trbnet_sender_ctl[3]);
+ break;
+
+ case PEXOR_TRBNETCOM_READ_UID:
+ // first send trbnet request
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_err[3]);
+ iowrite32(PEXOR_TRB_NET_READUNIQUEID, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32((((u32) descriptor.trb_address << 16) |
+ PEXOR_TRB_CMD_NETADMINISTRATION),
+ priv->pexor.trbnet_sender_ctl[3]);
+ break;
+
+ case PEXOR_TRBNETCOM_SET_ADDRESS:
+ // first send trbnet request
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_err[3]);
+ iowrite32(PEXOR_TRB_NET_SETADDRESS, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg0 & 0xffff, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg0 >> 16 & 0xffff,
+ priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg1 & 0xffff, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg1 >> 16 & 0xffff,
+ priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.arg2, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(descriptor.trb_address, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0x00000000, priv->pexor.trbnet_sender_data[3]);
+ iowrite32(0xffff0000 | PEXOR_TRB_CMD_NETADMINISTRATION,
+ priv->pexor.trbnet_sender_ctl[3]);
+ break;
+
+ case PEXOR_TRBNETCOM_IPU_DATA_READ:
+ iowrite32(((descriptor.arg1 & 0xff) << 24) | (descriptor.arg2 & 0xffffff),
+ priv->pexor.trbnet_sender_err[1]);
+ iowrite32((descriptor.arg0 & 0x0f) | PEXOR_TRB_CMD_SHORT_TRANSFER,
+ priv->pexor.trbnet_sender_ctl[1]);
+ channel = 1;
+ break;
+
+ case PEXOR_TRBNETCOM_SEND_TRIGGER:
+ iowrite32(((descriptor.arg1 & 0xff) << 24) | (descriptor.arg2 & 0xffffff),
+ priv->pexor.trbnet_sender_err[0]);
+ iowrite32((descriptor.arg1 >> 8) & 0xffff,
+ priv->pexor.trbnet_sender_trigger_info);
+
+ iowrite32((descriptor.arg0 & 0x0f) | PEXOR_TRB_CMD_SHORT_TRANSFER,
+ priv->pexor.trbnet_sender_ctl[0]);
+ channel = 0;
+ break;
+
+ default:
+ pexor_msg(KERN_ERR "ERROR> invalid request %x\n", command);
+ status = -EFAULT;
+ goto OUT_IOCTL;
+ };
+
+ if (descriptor.dma != 0 && channel == 3) {
+ /* only channel 3 supports DMA */
+
+ /* Start DMA transfer */
+ pexor_dbg(KERN_ERR "Start DMA transfer\n");
+ for (sg_ctr = 0; sg_ctr < priv->dma.nr_sglist; sg_ctr++) {
+ unsigned int loops = 0;
+
+ mb();
+ /* do we need this?? */
+ /*
+ dma_sync_single_for_device(priv->class_dev,
+ sg_dma_address(&priv->dma.sglist[sg_ctr]),
+ sg_dma_len(&priv->dma.sglist[sg_ctr]),
+ DMA_FROM_DEVICE);
+ */
+ iowrite32(sg_dma_address(&priv->dma.sglist[sg_ctr]),
+ priv->pexor.dma_dest);
+ iowrite32(sg_dma_len(&priv->dma.sglist[sg_ctr]) / 4,
+ priv->pexor.dma_len);
+
+ /* wait for dma complete */
+ for (loops = 0; loops < PEXOR_DMA_MAXPOLLS * 100; loops++) {
+ dmastat = ioread32(priv->pexor.dma_control_stat);
+ mb();
+ if (((dmastat & PEXOR_DMA_ENABLED_BIT) == 0) && (dmastat != 0)) {
+ break;
+ }
+ if ((dmastat & PEXOR_TRB_BIT_DMA_MORE) != 0)
+ break;
+ }
+ if (loops == PEXOR_DMA_MAXPOLLS * 100) {
+ pexor_msg(KERN_ERR
+ "ERROR> wait_dma_complete: polling longer than %d cycles "
+ "(delay %d ns) for dma complete! Status: 0x%x\n",
+ PEXOR_DMA_MAXPOLLS, PEXOR_DMA_POLLDELAY,
+ (unsigned int)dmastat);
+ /* reset DMA */
+ iowrite32(PEXOR_TRB_DMA_RESET, priv->pexor.dma_control_stat);
+ /* do we need to flush the fifo-buffer? */
+ status = -EFAULT;
+ goto OUT_IOCTL;
+ }
+
+ /* sync current sg-entry to CPU */
+ dma_sync_single_for_cpu(priv->class_dev,
+ sg_dma_address(&priv->dma.sglist[sg_ctr]),
+ sg_dma_len(&priv->dma.sglist[sg_ctr]),
+ DMA_FROM_DEVICE);
+ mb();
+
+ /* check status: do we need another dma for data */
+ if ((dmastat & PEXOR_DMA_ENABLED_BIT) == 0) {
+ dmaSize = dmastat >> 8; //was +=
+ break;
+ }
+ if ((sg_ctr + 1) >= priv->dma.nr_sglist) {
+ pexor_msg(KERN_ERR
+ "ERROR> no more DMA buffers available, aborting DMA\n");
+ /* reset DMA */
+ iowrite32(PEXOR_TRB_DMA_RESET, priv->pexor.dma_control_stat);
+ /* do we need to flush the fifo-buffer? */
+ status = -EFAULT;
+ goto OUT_IOCTL;
+ }
+ }
+
+ status = dmaSize;
+
+#ifdef PEXOR_TRB_DEBUG
+ {
+ int i;
+ pexor_msg(KERN_ERR "DMA: dmaSize: %d\n", dmaSize);
+ for (i = 0; i < dmaSize + 20; i++) {
+ u32 val = 0;
+ val = ioread32((i % 2 == 0)
+ ? priv->pexor.dma_debug0 : priv->pexor.dma_debug1);
+ pexor_msg(KERN_ERR "DMA: %d 0x%08x DEBUG:0x%08x\n", i,
+ ((u32 *) priv->dma.buffer)[i], val);
+ }
+ }
+#endif
+ } else {
+ /* do FIFO transfer to DMA Buffer */
+ pexor_dbg(KERN_ERR "Start FIFO copy to DMA buffer\n");
+ status = pexor_copy_fifo_to_dma(priv, channel);
+ if (status == -1) {
+ status = -EFAULT;
+ goto OUT_IOCTL;
+ }
+ }
+
+OUT_IOCTL:
+ spin_unlock((&(priv->dma_lock)));
+ return status;
+}
+
+/* ---------------------------------------------------------------------- */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joern Adamczewski-Musch / Ludwig Maier");
+MODULE_DESCRIPTION("pexor device driver for HADES TRBNet");
+MODULE_VERSION("2.0.0");
+MODULE_DEVICE_TABLE(pci, ids);
+
+module_init(pexor_init);
+module_exit(pexor_exit);